diff --git a/go.mod b/go.mod index b3c4d4a..32299ab 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,6 @@ module github.com/valyala/quicktemplate go 1.11 require ( - github.com/andybalholm/brotli v1.0.3 // indirect - github.com/klauspost/compress v1.13.5 // indirect github.com/valyala/bytebufferpool v1.0.0 - github.com/valyala/fasthttp v1.30.0 + github.com/valyala/fasthttp v1.34.0 ) diff --git a/go.sum b/go.sum index cc1750d..f63e7c9 100644 --- a/go.sum +++ b/go.sum @@ -1,22 +1,22 @@ -github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.3 h1:fpcw+r1N1h0Poc1F/pHbW40cUm/lMEQslZtCkBQ0UnM= -github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U= +github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.30.0 h1:nBNzWrgZUUHohyLPU/jTvXdhrcaf2m5k3bWk+3Q049g= -github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4= +github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/andybalholm/brotli/cluster_command.go b/vendor/github.com/andybalholm/brotli/cluster_command.go index 7449751..45b569b 100644 --- a/vendor/github.com/andybalholm/brotli/cluster_command.go +++ b/vendor/github.com/andybalholm/brotli/cluster_command.go @@ -1,7 +1,5 @@ package brotli -import "math" - /* Copyright 2013 Google Inc. All Rights Reserved. Distributed under MIT license. @@ -164,163 +162,3 @@ func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *his return populationCostCommand(&tmp) - candidate.bit_cost_ } } - -/* Find the best 'out' histogram for each of the 'in' histograms. - When called, clusters[0..num_clusters) contains the unique values from - symbols[0..in_size), but this property is not preserved in this function. - Note: we assume that out[]->bit_cost_ is already up-to-date. */ -func histogramRemapCommand(in []histogramCommand, in_size uint, clusters []uint32, num_clusters uint, out []histogramCommand, symbols []uint32) { - var i uint - for i = 0; i < in_size; i++ { - var best_out uint32 - if i == 0 { - best_out = symbols[0] - } else { - best_out = symbols[i-1] - } - var best_bits float64 = histogramBitCostDistanceCommand(&in[i], &out[best_out]) - var j uint - for j = 0; j < num_clusters; j++ { - var cur_bits float64 = histogramBitCostDistanceCommand(&in[i], &out[clusters[j]]) - if cur_bits < best_bits { - best_bits = cur_bits - best_out = clusters[j] - } - } - - symbols[i] = best_out - } - - /* Recompute each out based on raw and symbols. */ - for i = 0; i < num_clusters; i++ { - histogramClearCommand(&out[clusters[i]]) - } - - for i = 0; i < in_size; i++ { - histogramAddHistogramCommand(&out[symbols[i]], &in[i]) - } -} - -/* Reorders elements of the out[0..length) array and changes values in - symbols[0..length) array in the following way: - * when called, symbols[] contains indexes into out[], and has N unique - values (possibly N < length) - * on return, symbols'[i] = f(symbols[i]) and - out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, - where f is a bijection between the range of symbols[] and [0..N), and - the first occurrences of values in symbols'[i] come in consecutive - increasing order. - Returns N, the number of unique values in symbols[]. */ - -var histogramReindexCommand_kInvalidIndex uint32 = math.MaxUint32 - -func histogramReindexCommand(out []histogramCommand, symbols []uint32, length uint) uint { - var new_index []uint32 = make([]uint32, length) - var next_index uint32 - var tmp []histogramCommand - var i uint - for i = 0; i < length; i++ { - new_index[i] = histogramReindexCommand_kInvalidIndex - } - - next_index = 0 - for i = 0; i < length; i++ { - if new_index[symbols[i]] == histogramReindexCommand_kInvalidIndex { - new_index[symbols[i]] = next_index - next_index++ - } - } - - /* TODO: by using idea of "cycle-sort" we can avoid allocation of - tmp and reduce the number of copying by the factor of 2. */ - tmp = make([]histogramCommand, next_index) - - next_index = 0 - for i = 0; i < length; i++ { - if new_index[symbols[i]] == next_index { - tmp[next_index] = out[symbols[i]] - next_index++ - } - - symbols[i] = new_index[symbols[i]] - } - - new_index = nil - for i = 0; uint32(i) < next_index; i++ { - out[i] = tmp[i] - } - - tmp = nil - return uint(next_index) -} - -func clusterHistogramsCommand(in []histogramCommand, in_size uint, max_histograms uint, out []histogramCommand, out_size *uint, histogram_symbols []uint32) { - var cluster_size []uint32 = make([]uint32, in_size) - var clusters []uint32 = make([]uint32, in_size) - var num_clusters uint = 0 - var max_input_histograms uint = 64 - var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 - var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) - var i uint - - /* For the first pass of clustering, we allow all pairs. */ - for i = 0; i < in_size; i++ { - cluster_size[i] = 1 - } - - for i = 0; i < in_size; i++ { - out[i] = in[i] - out[i].bit_cost_ = populationCostCommand(&in[i]) - histogram_symbols[i] = uint32(i) - } - - for i = 0; i < in_size; i += max_input_histograms { - var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) - var num_new_clusters uint - var j uint - for j = 0; j < num_to_combine; j++ { - clusters[num_clusters+j] = uint32(i + j) - } - - num_new_clusters = histogramCombineCommand(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) - num_clusters += num_new_clusters - } - { - /* For the second pass, we limit the total number of histogram pairs. - After this limit is reached, we only keep searching for the best pair. */ - var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) - if pairs_capacity < (max_num_pairs + 1) { - var _new_size uint - if pairs_capacity == 0 { - _new_size = max_num_pairs + 1 - } else { - _new_size = pairs_capacity - } - var new_array []histogramPair - for _new_size < (max_num_pairs + 1) { - _new_size *= 2 - } - new_array = make([]histogramPair, _new_size) - if pairs_capacity != 0 { - copy(new_array, pairs[:pairs_capacity]) - } - - pairs = new_array - pairs_capacity = _new_size - } - - /* Collapse similar histograms. */ - num_clusters = histogramCombineCommand(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) - } - - pairs = nil - cluster_size = nil - - /* Find the optimal map from original histograms to the final ones. */ - histogramRemapCommand(in, in_size, clusters, num_clusters, out, histogram_symbols) - - clusters = nil - - /* Convert the context map to a canonical form. */ - *out_size = histogramReindexCommand(out, histogram_symbols, in_size) -} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment.go b/vendor/github.com/andybalholm/brotli/compress_fragment.go index 2fc2df1..c9bd057 100644 --- a/vendor/github.com/andybalholm/brotli/compress_fragment.go +++ b/vendor/github.com/andybalholm/brotli/compress_fragment.go @@ -604,7 +604,7 @@ emit_commands: assert(candidate < ip) table[hash] = int(ip - base_ip) - if !(!isMatch5(in[ip:], in[candidate:])) { + if isMatch5(in[ip:], in[candidate:]) { break } } diff --git a/vendor/github.com/andybalholm/brotli/decode.go b/vendor/github.com/andybalholm/brotli/decode.go index d2f39a0..6a73b88 100644 --- a/vendor/github.com/andybalholm/brotli/decode.go +++ b/vendor/github.com/andybalholm/brotli/decode.go @@ -50,21 +50,6 @@ const ( decoderErrorUnreachable = -31 ) -/** - * The value of the last error code, negative integer. - * - * All other error code values are in the range from ::lastErrorCode - * to @c -1. There are also 4 other possible non-error codes @c 0 .. @c 3 in - * ::BrotliDecoderErrorCode enumeration. - */ -const lastErrorCode = decoderErrorUnreachable - -/** Options to be used with ::BrotliDecoderSetParameter. */ -const ( - decoderParamDisableRingBufferReallocation = 0 - decoderParamLargeWindow = 1 -) - const huffmanTableBits = 8 const huffmanTableMask = 0xFF @@ -81,28 +66,6 @@ var kCodeLengthPrefixLength = [16]byte{2, 2, 2, 3, 2, 2, 2, 4, 2, 2, 2, 3, 2, 2, var kCodeLengthPrefixValue = [16]byte{0, 4, 3, 2, 0, 4, 3, 1, 0, 4, 3, 2, 0, 4, 3, 5} -func decoderSetParameter(state *Reader, p int, value uint32) bool { - if state.state != stateUninited { - return false - } - switch p { - case decoderParamDisableRingBufferReallocation: - if !(value == 0) { - state.canny_ringbuffer_allocation = 0 - } else { - state.canny_ringbuffer_allocation = 1 - } - return true - - case decoderParamLargeWindow: - state.large_window = (!(value == 0)) - return true - - default: - return false - } -} - /* Saves error code and converts it to BrotliDecoderResult. */ func saveErrorCode(s *Reader, e int) int { s.error_code = int(e) @@ -1125,10 +1088,8 @@ func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_a Reads 3..54 bits. */ func decodeBlockTypeAndLength(safe int, s *Reader, tree_type int) bool { var max_block_type uint32 = s.num_block_types[tree_type] - var type_tree []huffmanCode - type_tree = s.block_type_trees[tree_type*huffmanMaxSize258:] - var len_tree []huffmanCode - len_tree = s.block_len_trees[tree_type*huffmanMaxSize26:] + type_tree := s.block_type_trees[tree_type*huffmanMaxSize258:] + len_tree := s.block_len_trees[tree_type*huffmanMaxSize26:] var br *bitReader = &s.br var ringbuffer []uint32 = s.block_type_rb[tree_type*2:] var block_type uint32 @@ -1280,8 +1241,7 @@ func unwrittenBytes(s *Reader, wrap bool) uint { Returns BROTLI_DECODER_NEEDS_MORE_OUTPUT only if there is more output to push and either ring-buffer is as big as window size, or |force| is true. */ func writeRingBuffer(s *Reader, available_out *uint, next_out *[]byte, total_out *uint, force bool) int { - var start []byte - start = s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):] + start := s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):] var to_write uint = unwrittenBytes(s, true) var num_written uint = *available_out if num_written > to_write { @@ -1412,8 +1372,7 @@ func copyUncompressedBlockToOutput(available_out *uint, next_out *[]byte, total_ case stateUncompressedWrite: { - var result int - result = writeRingBuffer(s, available_out, next_out, total_out, false) + result := writeRingBuffer(s, available_out, next_out, total_out, false) if result != decoderSuccess { return result } @@ -1931,8 +1890,7 @@ CommandPostDecodeLiterals: } if transform_idx < int(trans.num_transforms) { - var word []byte - word = words.data[offset:] + word := words.data[offset:] var len int = i if transform_idx == int(trans.cutOffTransforms[0]) { copy(s.ringbuffer[pos:], word[:uint(len)]) @@ -1954,10 +1912,8 @@ CommandPostDecodeLiterals: } } else { var src_start int = (pos - s.distance_code) & s.ringbuffer_mask - var copy_dst []byte - copy_dst = s.ringbuffer[pos:] - var copy_src []byte - copy_src = s.ringbuffer[src_start:] + copy_dst := s.ringbuffer[pos:] + copy_src := s.ringbuffer[src_start:] var dst_end int = pos + i var src_end int = src_start + i @@ -2494,8 +2450,6 @@ func decoderDecompressStream(s *Reader, available_in *uint, next_in *[]byte, ava } else { s.state = stateCommandBegin } - - break } else if s.state == stateCommandPostWrite2 { s.state = stateCommandPostWrapCopy /* BROTLI_STATE_COMMAND_INNER_WRITE */ } else { diff --git a/vendor/github.com/andybalholm/brotli/encode.go b/vendor/github.com/andybalholm/brotli/encode.go index 8f9eaa4..8e25a4e 100644 --- a/vendor/github.com/andybalholm/brotli/encode.go +++ b/vendor/github.com/andybalholm/brotli/encode.go @@ -920,8 +920,7 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool { REQUIRED: |header| should be 8-byte aligned and at least 16 bytes long. REQUIRED: |block_size| <= (1 << 24). */ func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint { - var storage_ix uint - storage_ix = uint(s.last_bytes_bits_) + storage_ix := uint(s.last_bytes_bits_) header[0] = byte(s.last_bytes_) header[1] = byte(s.last_bytes_ >> 8) s.last_bytes_ = 0 diff --git a/vendor/github.com/andybalholm/brotli/fast_log.go b/vendor/github.com/andybalholm/brotli/fast_log.go index bbae300..9d6607f 100644 --- a/vendor/github.com/andybalholm/brotli/fast_log.go +++ b/vendor/github.com/andybalholm/brotli/fast_log.go @@ -1,6 +1,9 @@ package brotli -import "math" +import ( + "math" + "math/bits" +) /* Copyright 2013 Google Inc. All Rights Reserved. @@ -11,16 +14,7 @@ import "math" /* Utilities for fast computation of logarithms. */ func log2FloorNonZero(n uint) uint32 { - /* TODO: generalize and move to platform.h */ - var result uint32 = 0 - for { - n >>= 1 - if n == 0 { - break - } - result++ - } - return result + return uint32(bits.Len(n)) - 1 } /* A lookup table for small values of log2(int) to be used in entropy diff --git a/vendor/github.com/andybalholm/brotli/hash.go b/vendor/github.com/andybalholm/brotli/hash.go index 003b433..00f812e 100644 --- a/vendor/github.com/andybalholm/brotli/hash.go +++ b/vendor/github.com/andybalholm/brotli/hash.go @@ -29,8 +29,6 @@ type hasherHandle interface { Store(data []byte, mask uint, ix uint) } -type score_t uint - const kCutoffTransformsCount uint32 = 10 /* 0, 12, 27, 23, 42, 63, 56, 48, 59, 64 */ diff --git a/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go b/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go index 3364c44..306e46d 100644 --- a/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go +++ b/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go @@ -110,8 +110,7 @@ func (h *hashForgetfulChain) Prepare(one_shot bool, input_size uint, data []byte func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) { var key uint = h.HashBytes(data[ix&mask:]) var bank uint = key & (h.numBanks - 1) - var idx uint - idx = uint(h.free_slot_idx[bank]) & ((1 << h.bankBits) - 1) + idx := uint(h.free_slot_idx[bank]) & ((1 << h.bankBits) - 1) h.free_slot_idx[bank]++ var delta uint = ix - uint(h.addr[key]) h.tiny_hash[uint16(ix)] = byte(key) diff --git a/vendor/github.com/andybalholm/brotli/hash_rolling.go b/vendor/github.com/andybalholm/brotli/hash_rolling.go index ad655a0..6630fc0 100644 --- a/vendor/github.com/andybalholm/brotli/hash_rolling.go +++ b/vendor/github.com/andybalholm/brotli/hash_rolling.go @@ -48,7 +48,6 @@ type hashRolling struct { state uint32 table []uint32 next_ix uint - chunk_len uint32 factor uint32 factor_remove uint32 } diff --git a/vendor/github.com/andybalholm/brotli/http.go b/vendor/github.com/andybalholm/brotli/http.go index af58670..1e98196 100644 --- a/vendor/github.com/andybalholm/brotli/http.go +++ b/vendor/github.com/andybalholm/brotli/http.go @@ -180,8 +180,8 @@ func init() { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { t |= isSpace } if isChar && !isCtl && !isSeparator { diff --git a/vendor/github.com/andybalholm/brotli/static_dict.go b/vendor/github.com/andybalholm/brotli/static_dict.go index 8e7492d..bc05566 100644 --- a/vendor/github.com/andybalholm/brotli/static_dict.go +++ b/vendor/github.com/andybalholm/brotli/static_dict.go @@ -77,8 +77,7 @@ func findAllStaticDictionaryMatches(dict *encoderDictionary, data []byte, min_le var offset uint = uint(dict.buckets[hash(data)]) var end bool = offset == 0 for !end { - var w dictWord - w = dict.dict_words[offset] + w := dict.dict_words[offset] offset++ var l uint = uint(w.len) & 0x1F var n uint = uint(1) << dict.words.size_bits_by_length[l] @@ -431,8 +430,7 @@ func findAllStaticDictionaryMatches(dict *encoderDictionary, data []byte, min_le var offset uint = uint(dict.buckets[hash(data[1:])]) var end bool = offset == 0 for !end { - var w dictWord - w = dict.dict_words[offset] + w := dict.dict_words[offset] offset++ var l uint = uint(w.len) & 0x1F var n uint = uint(1) << dict.words.size_bits_by_length[l] @@ -596,8 +594,7 @@ func findAllStaticDictionaryMatches(dict *encoderDictionary, data []byte, min_le var offset uint = uint(dict.buckets[hash(data[2:])]) var end bool = offset == 0 for !end { - var w dictWord - w = dict.dict_words[offset] + w := dict.dict_words[offset] offset++ var l uint = uint(w.len) & 0x1F var n uint = uint(1) << dict.words.size_bits_by_length[l] @@ -629,8 +626,7 @@ func findAllStaticDictionaryMatches(dict *encoderDictionary, data []byte, min_le var offset uint = uint(dict.buckets[hash(data[5:])]) var end bool = offset == 0 for !end { - var w dictWord - w = dict.dict_words[offset] + w := dict.dict_words[offset] offset++ var l uint = uint(w.len) & 0x1F var n uint = uint(1) << dict.words.size_bits_by_length[l] diff --git a/vendor/github.com/andybalholm/brotli/utf8_util.go b/vendor/github.com/andybalholm/brotli/utf8_util.go index f86de3d..3244247 100644 --- a/vendor/github.com/andybalholm/brotli/utf8_util.go +++ b/vendor/github.com/andybalholm/brotli/utf8_util.go @@ -58,8 +58,7 @@ func isMostlyUTF8(data []byte, pos uint, mask uint, length uint, min_fraction fl var i uint = 0 for i < length { var symbol int - var current_data []byte - current_data = data[(pos+i)&mask:] + current_data := data[(pos+i)&mask:] var bytes_read uint = parseAsUTF8(&symbol, current_data, length-i) i += bytes_read if symbol < 0x110000 { diff --git a/vendor/github.com/andybalholm/brotli/writer.go b/vendor/github.com/andybalholm/brotli/writer.go index 63676b4..39feaef 100644 --- a/vendor/github.com/andybalholm/brotli/writer.go +++ b/vendor/github.com/andybalholm/brotli/writer.go @@ -61,6 +61,7 @@ func (w *Writer) Reset(dst io.Writer) { w.params.lgwin = uint(w.options.LGWin) } w.dst = dst + w.err = nil } func (w *Writer) writeChunk(p []byte, op int) (n int, err error) { diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 6cd1e96..87d5574 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -290,3 +290,15 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 5283ac5..bffa2f3 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -6,6 +6,7 @@ package flate import ( + "encoding/binary" "fmt" "io" "math" @@ -37,15 +38,17 @@ const ( maxMatchLength = 258 // The longest match for the compressor minOffsetSize = 1 // The shortest offset that makes any sense - // The maximum number of tokens we put into a single flat block, just too - // stop things from getting too large. - maxFlateBlockTokens = 1 << 14 + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 maxStoreBlockSize = 65535 hashBits = 17 // After 17 performance degrades hashSize = 1 << hashBits hashMask = (1 << hashBits) - 1 hashShift = (hashBits + minMatchLength - 1) / minMatchLength - maxHashOffset = 1 << 24 + maxHashOffset = 1 << 28 skipNever = math.MaxInt32 @@ -70,9 +73,9 @@ var levels = []compressionLevel{ {0, 0, 0, 0, 0, 6}, // Levels 7-9 use increasingly more lazy matching // and increasingly stringent conditions for "good enough". - {8, 8, 24, 16, skipNever, 7}, - {10, 16, 24, 64, skipNever, 8}, - {32, 258, 258, 4096, skipNever, 9}, + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, } // advancedState contains state for the advanced levels, with bigger hash tables, etc. @@ -93,8 +96,9 @@ type advancedState struct { hashOffset int // input window: unprocessed data is window[index:windowEnd] - index int - hashMatch [maxMatchLength + minMatchLength]uint32 + index int + estBitsPerByte int + hashMatch [maxMatchLength + minMatchLength]uint32 hash uint32 ii uint16 // position of last match, intended to overflow to reset. @@ -103,6 +107,7 @@ type advancedState struct { type compressor struct { compressionLevel + h *huffmanEncoder w *huffmanBitWriter // compression algorithm @@ -170,7 +175,8 @@ func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { window = d.window[d.blockStart:index] } d.blockStart = index - d.w.writeBlock(tok, eof, window) + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) return d.w.err } return nil @@ -263,7 +269,7 @@ func (d *compressor) fillWindow(b []byte) { // Try to find a match starting at index whose length is greater than prevSize. // We only look at chainCount possibilities before giving up. // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { minMatchLook := maxMatchLength if lookahead < minMatchLook { minMatchLook = lookahead @@ -279,36 +285,75 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain - length = prevLength - if length >= d.good { - tries >>= 2 - } + length = minMatchLength - 1 wEnd := win[pos+length] wPos := win[pos:] minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + cGain := 0 + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 6 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { n := matchLen(win[i:i+minMatchLook], wPos) - - if n > length && (n > minMatchLength || pos-i <= 4096) { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n])) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] } - wEnd = win[pos+n] } } - if i == minIndex { + if i <= minIndex { // hashPrev[i & windowMask] has already been overwritten, so stop now. break } i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex || i < 0 { + if i < minIndex { break } } @@ -327,8 +372,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error { // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - b = b[:4] - return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits) + return hash4u(binary.LittleEndian.Uint32(b), hashBits) } // bulkHash4 will compute hashes using the same @@ -337,11 +381,12 @@ func bulkHash4(b []byte, dst []uint32) { if len(b) < 4 { return } - hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + hb := binary.LittleEndian.Uint32(b) + dst[0] = hash4u(hb, hashBits) end := len(b) - 4 + 1 for i := 1; i < end; i++ { - hb = (hb << 8) | uint32(b[i+3]) + hb = (hb >> 8) | uint32(b[i+3])<<24 dst[i] = hash4u(hb, hashBits) } } @@ -374,10 +419,21 @@ func (d *compressor) deflateLazy() { if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { return } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) if s.index < s.maxInsertIndex { - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) + s.hash = hash4(d.window[s.index:]) } for { @@ -410,7 +466,7 @@ func (d *compressor) deflateLazy() { } if s.index < s.maxInsertIndex { // Update the hash - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) + s.hash = hash4(d.window[s.index:]) ch := s.hashHead[s.hash&hashMask] s.chainHead = int(ch) s.hashPrev[s.index&windowMask] = ch @@ -426,12 +482,37 @@ func (d *compressor) deflateLazy() { } if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { s.length = newLength s.offset = newOffset } } + if prevLength >= minMatchLength && s.length <= prevLength { + // Check for better match at end... + // + // checkOff must be >=2 since we otherwise risk checking s.index + // Offset of 2 seems to yield best results. + const checkOff = 2 + prevIndex := s.index - 1 + if prevIndex+prevLength+checkOff < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength { + end = maxMatchLength + } + end += prevIndex + idx := prevIndex + prevLength - (4 - checkOff) + h := hash4(d.window[idx:]) + ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff) + if ch2 > minIndex { + length := matchLen(d.window[prevIndex:end], d.window[ch2:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + } + } + } // There was a match at the previous step, and the current match is // not better. Output the previous match. d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) @@ -479,6 +560,7 @@ func (d *compressor) deflateLazy() { } d.tokens.Reset() } + s.ii = 0 } else { // Reset, if we got a match this run. if s.length >= minMatchLength { @@ -498,13 +580,12 @@ func (d *compressor) deflateLazy() { // If we have a long run of no matches, skip additional bytes // Resets when s.ii overflows after 64KB. - if s.ii > 31 { - n := int(s.ii >> 5) + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) for j := 0; j < n; j++ { if s.index >= d.windowEnd-1 { break } - d.tokens.AddLiteral(d.window[s.index-1]) if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { @@ -512,6 +593,14 @@ func (d *compressor) deflateLazy() { } d.tokens.Reset() } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } s.index++ } // Flush last byte @@ -611,7 +700,9 @@ func (d *compressor) write(b []byte) (n int, err error) { } n = len(b) for len(b) > 0 { - d.step(d) + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } b = b[d.fill(d, b):] if d.err != nil { return 0, d.err @@ -652,13 +743,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) { level = 5 fallthrough case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 8 + d.w.logNewTablePenalty = 7 d.fast = newFastEnc(level) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeFast case 7 <= level && level <= 9: - d.w.logNewTablePenalty = 10 + d.w.logNewTablePenalty = 8 d.state = &advancedState{} d.compressionLevel = levels[level] d.initDeflate() diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 347ac2c..d55ea2a 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -179,7 +179,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDecode { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } @@ -213,26 +213,15 @@ func (e *fastGen) Reset() { // matchLen returns the maximum length. // 'a' must be the shortest of the two. func matchLen(a, b []byte) int { - b = b[:len(a)] var checked int - if len(a) >= 4 { - // Try 4 bytes first - if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 { - return bits.TrailingZeros32(diff) >> 3 - } - // Switch to 8 byte matching. - checked = 4 - a = a[4:] - b = b[4:] - for len(a) >= 8 { - b = b[:len(a)] - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - return checked + (bits.TrailingZeros64(diff) >> 3) - } - checked += 8 - a = a[8:] - b = b[8:] + + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) } + checked += 8 + a = a[8:] + b = b[8:] } b = b[:len(a)] for i := range a { diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 3ad5e98..25f6d11 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "fmt" "io" + "math" ) const ( @@ -24,6 +25,10 @@ const ( codegenCodeCount = 19 badCode = 255 + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + // bufferFlushSize indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since @@ -36,8 +41,11 @@ const ( bufferSize = bufferFlushSize + 8 ) +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + // The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]int8{ +var lengthExtraBits = [32]uint8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, @@ -51,19 +59,22 @@ var lengthBase = [32]uint8{ 64, 80, 96, 112, 128, 160, 192, 224, 255, } +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + // offset code word extra bits. -var offsetExtraBits = [64]int8{ +var offsetExtraBits = [32]int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, /* extended window */ - 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, + 14, 14, } var offsetCombined = [32]uint32{} func init() { - var offsetBase = [64]uint32{ + var offsetBase = [32]uint32{ /* normal deflate */ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, @@ -73,17 +84,15 @@ func init() { 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, /* extended window */ - 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, - 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, - 0x100000, 0x180000, 0x200000, 0x300000, + 0x008000, 0x00c000, } for i := range offsetCombined[:] { // Don't use extended window values... - if offsetBase[i] > 0x006000 { + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { continue } - offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) } } @@ -99,7 +108,7 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint16 + nbits uint8 nbytes uint8 lastHuffMan bool literalEncoding *huffmanEncoder @@ -155,37 +164,33 @@ func (w *huffmanBitWriter) reset(writer io.Writer) { w.lastHuffMan = false } -func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) { - offsets, lits = true, true +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { a := t.offHist[:offsetCodeCount] - b := w.offsetFreq[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - offsets = false - break + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].len == 0 { + return false } } a = t.extraHist[:literalCount-256] - b = w.literalFreq[256:literalCount] + b = w.literalEncoding.codes[256:literalCount] b = b[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - lits = false - break + for i, v := range a { + if v != 0 && b[i].len == 0 { + return false } } - if lits { - a = t.litHist[:] - b = w.literalFreq[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - lits = false - break - } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].len == 0 { + return false } } - return + return true } func (w *huffmanBitWriter) flush() { @@ -221,8 +226,8 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { - w.bits |= uint64(b) << w.nbits +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { w.writeOutBits() @@ -423,7 +428,7 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { func (w *huffmanBitWriter) writeCode(c hcode) { // The function does not get inlined if we "& 63" the shift. - w.bits |= uint64(c.code) << w.nbits + w.bits |= uint64(c.code) << (w.nbits & 63) w.nbits += c.len if w.nbits >= 48 { w.writeOutBits() @@ -566,7 +571,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { w.lastHeader = 0 } numLiterals, numOffsets := w.indexTokens(tokens, false) - w.generate(tokens) + w.generate() var extraBits int storedSize, storable := w.storedSize(input) if storable { @@ -577,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding - var size = w.fixedSize(extraBits) + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } // Dynamic Huffman? var numCodegens int @@ -595,7 +603,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { } // Stored bytes? - if storable && storedSize < size { + if storable && storedSize <= size { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -634,22 +642,39 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 w.lastHuffMan = false } - if !sync { - tokens.Fill() + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 } + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } var size int + // Check if we should reuse. if w.lastHeader > 0 { // Estimate size for using a new table. // Use the previous header size as the best estimate. newSize := w.lastHeader + tokens.EstimatedBits() - newSize += newSize >> w.logNewTablePenalty + newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty // The estimated size is calculated as an optimal table. // We add a penalty to make it more realistic and re-use a bit more. - reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize() + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits // Check if a new table is better. if newSize < reuseSize { @@ -660,35 +685,83 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b } else { size = reuseSize } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } // Check if we get a reasonable size decrease. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + if storable && ssize <= size { w.writeStoredHeader(len(input), eof) w.writeBytes(input) - w.lastHeader = 0 return } } // We want a new block/table if w.lastHeader == 0 { - w.generate(tokens) + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) w.codegenEncoding.generate(w.codegenFreq[:], 7) + var numCodegens int - size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize()) - // Store bytes, if we don't get a reasonable improvement. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. w.writeStoredHeader(len(input), eof) w.writeBytes(input) - w.lastHeader = 0 return } // Write Huffman table. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - w.lastHeader, _ = w.headerSize() + if !sync { + w.lastHeader, _ = w.headerSize() + } w.lastHuffMan = false } @@ -699,6 +772,19 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) } +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + // indexTokens indexes a slice of tokens, and updates // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. @@ -733,7 +819,7 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num return } -func (w *huffmanBitWriter) generate(t *tokens) { +func (w *huffmanBitWriter) generate() { w.literalEncoding.generate(w.literalFreq[:literalCount], 15) w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) } @@ -765,10 +851,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range tokens { - if t < matchType { + if t < 256 { //w.writeCode(lits[t.literal()]) - c := lits[t.literal()] - bits |= uint64(c.code) << nbits + c := lits[t] + bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -790,13 +876,13 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() - lengthCode := lengthCode(length) + lengthCode := lengthCode(length) & 31 if false { - w.writeCode(lengths[lengthCode&31]) + w.writeCode(lengths[lengthCode]) } else { // inlined - c := lengths[lengthCode&31] - bits |= uint64(c.code) << nbits + c := lengths[lengthCode] + bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -815,11 +901,11 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } - extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) - if extraLengthBits > 0 { + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode&31]) - bits |= uint64(extraLength) << nbits + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -839,14 +925,13 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } // Write the offset offset := t.offset() - offsetCode := offset >> 16 - offset &= matchOffsetOnlyMask + offsetCode := (offset >> 16) & 31 if false { - w.writeCode(offs[offsetCode&31]) + w.writeCode(offs[offsetCode]) } else { // inlined c := offs[offsetCode] - bits |= uint64(c.code) << nbits + bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -864,11 +949,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } } - offsetComb := offsetCombined[offsetCode] - if offsetComb > 1<<16 { + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits - nbits += uint16(offsetComb >> 16) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -934,6 +1020,29 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 histogram(input, w.literalFreq[:numLiterals], fill) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } w.literalFreq[endBlockMarker] = 1 w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) if fill { @@ -951,8 +1060,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. - ssize, storable := w.storedSize(input) if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -963,7 +1074,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if estBits < reuseSize { if debugDeflate { - //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) @@ -996,14 +1107,44 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { encoding := w.literalEncoding.codes[:256] // Go 1.16 LOVES having these on stack. At least 1.5x the speed. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - for _, t := range input { - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= uint64(c.code) << nbits - nbits += c.len - if debugDeflate { - count += int(c.len) + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 } + a, b := encoding[input[0]], encoding[input[1]] + bits |= uint64(a.code) << (nbits & 63) + bits |= uint64(b.code) << ((nbits + a.len) & 63) + c := encoding[input[2]] + nbits += b.len + a.len + bits |= uint64(c.code) << (nbits & 63) + nbits += c.len + input = input[3:] + } + + // Remaining... + for _, t := range input { if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1015,17 +1156,33 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= uint64(c.code) << (nbits & 63) + nbits += c.len + if debugDeflate { + count += int(c.len) + } } // Restore... w.bits, w.nbits, w.nbytes = bits, nbits, nbytes if debugDeflate { - fmt.Println("wrote", count/8, "bytes") + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + if eof || sync { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 67b2b38..9ab497c 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -17,7 +17,8 @@ const ( // hcode is a huffman code with a bit code and bit length. type hcode struct { - code, len uint16 + code uint16 + len uint8 } type huffmanEncoder struct { @@ -56,7 +57,7 @@ type levelInfo struct { } // set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint16) { +func (h *hcode) set(code uint16, length uint8) { h.len = length h.code = code } @@ -80,7 +81,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { var ch uint16 for ch = 0; ch < literalCount; ch++ { var bits uint16 - var size uint16 + var size uint8 switch { case ch < 144: // size 8, 000110000 .. 10111111 @@ -99,7 +100,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { bits = ch + 192 - 280 size = 8 } - codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + codes[ch] = hcode{code: reverseBits(bits, size), len: size} } return h } @@ -129,9 +130,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int { func (h *huffmanEncoder) bitLengthRaw(b []byte) int { var total int for _, f := range b { - if f != 0 { - total += int(h.codes[f].len) - } + total += int(h.codes[f].len) } return total } @@ -189,14 +188,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // of the level j ancestor. var leafCounts [maxBitsLimit][maxBitsLimit]int32 + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + for level := int32(1); level <= maxBits; level++ { // For every level, the first two items are the first two characters. // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: int32(list[1].freq), - nextCharFreq: int32(list[2].freq), - nextPairFreq: int32(list[0].freq) + int32(list[1].freq), + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, } leafCounts[level][level] = 2 if level == 1 { @@ -207,8 +211,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We need a total of 2*n - 2 items at top level and have already generated 2. levels[maxBits].needed = 2*n - 4 - level := maxBits - for { + level := uint32(maxBits) + for level < 16 { l := &levels[level] if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { // We've run out of both leafs and pairs. @@ -240,7 +244,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // more values in the level below l.lastFreq = l.nextPairFreq // Take leaf counts from the lower level, except counts[level] remains the same. - copy(leafCounts[level][:level], leafCounts[level-1][:level]) + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } levels[l.level-1].needed = 2 } @@ -298,7 +308,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN sortByLiteral(chunk) for _, node := range chunk { - h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)} code++ } list = list[0 : len(list)-int(bits)] @@ -311,6 +321,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] // Number of non-zero literals count := 0 // Set list to be the set of all non-zero literals and their frequencies @@ -319,11 +330,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list[count] = literalNode{uint16(i), f} count++ } else { - list[count] = literalNode{} - h.codes[i].len = 0 + codes[i].len = 0 } } - list[len(freq)] = literalNode{} + list[count] = literalNode{} list = list[:count] if count <= 2 { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index d1edb35..414c0be 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -36,6 +36,13 @@ type lengthExtra struct { var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder @@ -328,11 +335,17 @@ func (f *decompressor) nextBlock() { switch typ { case 0: f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } case 1: // compressed, fixed Huffman tables f.hl = &fixedHuffmanDecoder f.hd = nil f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("predefinied huffman block") + } case 2: // compressed, dynamic Huffman tables if f.err = f.readHuffman(); f.err != nil { @@ -341,6 +354,9 @@ func (f *decompressor) nextBlock() { f.hl = &f.h1 f.hd = &f.h2 f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("dynamic huffman block") + } default: // 3 is reserved. if debugDecode { @@ -550,221 +566,6 @@ func (f *decompressor) readHuffman() error { return nil } -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlockGeneric() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var n uint // number of bits extra - var length int - var err error - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 - case v < maxNumLit: - length = 258 - n = 0 - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - if n > 0 { - for f.nb < n { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) - } - f.err = err - return - } - dist = uint32(sym) - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - // Copy a single uncompressed data block from input to output. func (f *decompressor) dataBlock() { // Uncompressed. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index cc6db27..8d632ce 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() { ) fr := f.r.(*bytes.Buffer) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -39,41 +44,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -88,10 +87,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -101,9 +102,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -111,25 +113,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -137,12 +141,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -152,38 +156,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -197,9 +198,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -224,6 +228,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -248,10 +253,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() { ) fr := f.r.(*bytes.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -283,41 +295,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -332,10 +338,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -345,9 +353,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -355,25 +364,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -381,12 +392,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -396,38 +407,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -441,9 +449,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -468,6 +479,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -492,10 +504,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() { ) fr := f.r.(*bufio.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -527,41 +546,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -576,10 +589,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBufioReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -589,9 +604,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -599,25 +615,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -625,12 +643,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -640,38 +658,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -685,9 +700,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -712,6 +730,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -736,10 +755,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBufioReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() { ) fr := f.r.(*strings.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -771,41 +797,286 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -818,12 +1089,14 @@ readLiteral: f.dict.writeByte(byte(v)) if f.dict.availWrite() == 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader + f.step = (*decompressor).huffmanGenericReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -833,9 +1106,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -843,25 +1117,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -869,12 +1145,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -884,38 +1160,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -929,9 +1202,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -956,6 +1232,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -978,12 +1255,14 @@ copyHistory: if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.step = (*decompressor).huffmanGenericReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } func (f *decompressor) huffmanBlockDecoder() func() { @@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() { return f.huffmanBufioReader case *strings.Reader: return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader default: - return f.huffmanBlockGeneric + return f.huffmanGenericReader } } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 1e5eea3..0f14f8d 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,6 +1,10 @@ package flate -import "fmt" +import ( + "encoding/binary" + "fmt" + "math/bits" +) // fastGen maintains the table for matches, // and the previous byte block for level 2. @@ -116,7 +120,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { @@ -125,11 +154,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 234c438..8603fbd 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -134,7 +134,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index c22b424..039639f 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -5,7 +5,7 @@ import "fmt" // fastEncL3 type fastEncL3 struct { fastGen - table [tableSize]tableEntryPrev + table [1 << 16]tableEntryPrev } // Encode uses a similar algorithm to level 2, will check up to two candidates. @@ -13,6 +13,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { const ( inputMargin = 8 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits ) if debugDeflate && e.cur < 0 { @@ -73,7 +75,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) s = nextS nextS = s + 1 + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -141,7 +143,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -156,7 +166,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(t+4) < len(src) && t > 0 { cv := load3232(src, t) - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, Cur: tableEntry{offset: e.cur + t}, @@ -165,30 +175,31 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { goto emitRemainder } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3}, + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 5 { + nextHash := hash4u(load3232(src, i), tableBits) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} } - x >>= 8 - prevHash = hash(uint32(x)) + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 - prevHash = hash(uint32(x)) + prevHash = hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 - currHash := hash(uint32(x)) + currHash := hash4u(uint32(x), tableBits) candidates := e.table[currHash] cv = uint32(x) e.table[currHash] = tableEntryPrev{ @@ -200,15 +211,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { candidate = candidates.Cur minOffset := e.cur + s - (maxMatchOffset - 4) - if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { - // We only check if value mismatches. - // Offset will always be invalid in other cases. + if candidate.offset > minOffset { + if cv == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } candidate = candidates.Prev if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } + // Match at prev... + continue } } cv = uint32(x >> 8) diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index e62f0c0..1cbffa1 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -135,7 +135,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 293a3a3..4b97576 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -210,7 +210,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index a709977..62888ed 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -243,7 +243,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if false { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 53e8991..544162a 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -249,7 +249,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index eb862d7..d818790 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,11 +13,10 @@ import ( ) const ( - // From top - // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused - // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 5 bits offsetcode - // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits lengthShift = 22 offsetMask = 1<maxnumlit offHist [32]uint16 // offset codes litHist [256]uint16 // codes 0->255 - n uint16 // Must be able to contain maxStoreBlockSize + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize tokens [maxStoreBlockSize + 1]token } @@ -142,7 +141,7 @@ func (t *tokens) Reset() { return } t.n = 0 - t.nLits = 0 + t.nFilled = 0 for i := range t.litHist[:] { t.litHist[i] = 0 } @@ -161,12 +160,12 @@ func (t *tokens) Fill() { for i, v := range t.litHist[:] { if v == 0 { t.litHist[i] = 1 - t.nLits++ + t.nFilled++ } } for i, v := range t.extraHist[:literalCount-256] { if v == 0 { - t.nLits++ + t.nFilled++ t.extraHist[i] = 1 } } @@ -196,20 +195,17 @@ func (t *tokens) indexTokens(in []token) { // emitLiteral writes a literal chunk and returns the number of bytes written. func emitLiteral(dst *tokens, lit []byte) { - ol := int(dst.n) - for i, v := range lit { - dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + for _, v := range lit { + dst.tokens[dst.n] = token(v) dst.litHist[v]++ + dst.n++ } - dst.n += uint16(len(lit)) - dst.nLits += len(lit) } func (t *tokens) AddLiteral(lit byte) { t.tokens[t.n] = token(lit) t.litHist[lit]++ t.n++ - t.nLits++ } // from https://stackoverflow.com/a/28730362 @@ -230,8 +226,9 @@ func (t *tokens) EstimatedBits() int { shannon := float32(0) bits := int(0) nMatches := 0 - if t.nLits > 0 { - invTotal := 1.0 / float32(t.nLits) + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) for _, v := range t.litHist[:] { if v > 0 { n := float32(v) @@ -275,10 +272,9 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { } oCode := offsetCode(xoffset) xoffset |= oCode << 16 - t.nLits++ t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode]++ + t.offHist[oCode&31]++ t.tokens[t.n] = token(matchType | xlength< 258 { // We need to have at least baseMatchLength left over for next loop. - xl = 258 - baseMatchLength + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } } xlength -= xl xl -= baseMatchLength - t.nLits++ t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc]++ + t.offHist[oc&31]++ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } -// The code is never more than 8 bits, but is returned as uint32 for convenience. -func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) } +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } // Returns the offset code corresponding to a specific offset func offsetCode(off uint32) uint32 { diff --git a/vendor/github.com/valyala/fasthttp/README.md b/vendor/github.com/valyala/fasthttp/README.md index 3b47385..c11d9ce 100644 --- a/vendor/github.com/valyala/fasthttp/README.md +++ b/vendor/github.com/valyala/fasthttp/README.md @@ -4,6 +4,12 @@ Fast HTTP implementation for Go. +# fasthttp might not be for you! +fasthttp was design for some high performance edge cases. **Unless** your server/client needs to handle **thousands of small to medium requests per seconds** and needs a consistent low millisecond response time fasthttp might not be for you. **For most cases `net/http` is much better** as it's easier to use and can handle more cases. For most cases you won't even notice the performance difference. + + +## General info and links + Currently fasthttp is successfully used by [VertaMedia](https://vertamedia.com/) in a production serving up to 200K rps from more than 1.5M concurrent keep-alive connections per physical server. @@ -34,7 +40,7 @@ connections per physical server. [FAQ](#faq) -# HTTP server performance comparison with [net/http](https://golang.org/pkg/net/http/) +## HTTP server performance comparison with [net/http](https://golang.org/pkg/net/http/) In short, fasthttp server is up to 10 times faster than net/http. Below are benchmark results. @@ -95,7 +101,7 @@ BenchmarkServerGet10ReqPerConn10KClients-4 30000000 346 ns/ BenchmarkServerGet100ReqPerConn10KClients-4 50000000 282 ns/op 0 B/op 0 allocs/op ``` -# HTTP client comparison with net/http +## HTTP client comparison with net/http In short, fasthttp client is up to 10 times faster than net/http. Below are benchmark results. @@ -157,14 +163,14 @@ BenchmarkClientGetEndToEnd1000Inmemory-4 10000000 1316 ns/ ``` -# Install +## Install ``` go get -u github.com/valyala/fasthttp ``` -# Switching from net/http to fasthttp +## Switching from net/http to fasthttp Unfortunately, fasthttp doesn't provide API identical to net/http. See the [FAQ](#faq) for details. @@ -393,7 +399,7 @@ instead of [html/template](https://golang.org/pkg/html/template/). [expvarhandler](https://godoc.org/github.com/valyala/fasthttp/expvarhandler). -# Performance optimization tips for multi-core systems +## Performance optimization tips for multi-core systems * Use [reuseport](https://godoc.org/github.com/valyala/fasthttp/reuseport) listener. * Run a separate server instance per CPU core with GOMAXPROCS=1. @@ -403,7 +409,7 @@ instead of [html/template](https://golang.org/pkg/html/template/). * Use the latest version of Go as each version contains performance improvements. -# Fasthttp best practices +## Fasthttp best practices * Do not allocate objects and `[]byte` buffers - just reuse them as much as possible. Fasthttp API design encourages this. @@ -424,7 +430,7 @@ instead of [html/template](https://golang.org/pkg/html/template/). [html/template](https://golang.org/pkg/html/template/) in your webserver. -# Tricks with `[]byte` buffers +## Tricks with `[]byte` buffers The following tricks are used by fasthttp. Use them in your code too. @@ -479,7 +485,28 @@ statusCode, body, err := fasthttp.Get(nil, "http://google.com/") uintBuf := fasthttp.AppendUint(nil, 1234) ``` -# Related projects +* String and `[]byte` buffers may converted without memory allocations +```go +func b2s(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func s2b(s string) (b []byte) { + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh.Data = sh.Data + bh.Cap = sh.Len + bh.Len = sh.Len + return b +} +``` + +### Warning: +This is an **unsafe** way, the result string and `[]byte` buffer share the same bytes. + +**Please make sure not to modify the bytes in the `[]byte` buffer if the string still survives!** + +## Related projects * [fasthttp](https://github.com/fasthttp) - various useful helpers for projects based on fasthttp. @@ -505,7 +532,7 @@ uintBuf := fasthttp.AppendUint(nil, 1234) * [Gearbox](https://github.com/gogearbox/gearbox) - :gear: gearbox is a web framework written in Go with a focus on high performance and memory optimization -# FAQ +## FAQ * *Why creating yet another http package instead of optimizing net/http?* @@ -542,9 +569,10 @@ uintBuf := fasthttp.AppendUint(nil, 1234) * net/http supports [HTTP/2.0 starting from go1.6](https://http2.golang.org/). * net/http API is stable, while fasthttp API constantly evolves. * net/http handles more HTTP corner cases. + * net/http can stream both request and response bodies + * net/http can handle bigger bodies as it doesn't read the whole body into memory * net/http should contain less bugs, since it is used and tested by much wider audience. - * net/http works on Go older than 1.5. * *Why fasthttp API prefers returning `[]byte` instead of `string`?* @@ -555,10 +583,7 @@ uintBuf := fasthttp.AppendUint(nil, 1234) * *Which GO versions are supported by fasthttp?* - Go1.5+. Older versions won't be supported, since their standard package - [miss useful functions](https://github.com/valyala/fasthttp/issues/5). - - **NOTE**: Go 1.9.7 is the oldest tested version. We recommend you to update as soon as you can. As of 1.11.3 we will drop 1.9.x support. + Go 1.15.x. Older versions won't be supported. * *Please provide real benchmark data and server information* diff --git a/vendor/github.com/valyala/fasthttp/args.go b/vendor/github.com/valyala/fasthttp/args.go index 07600f9..a8e4394 100644 --- a/vendor/github.com/valyala/fasthttp/args.go +++ b/vendor/github.com/valyala/fasthttp/args.go @@ -110,7 +110,8 @@ func (a *Args) String() string { // QueryString returns query string for the args. // -// The returned value is valid until the next call to Args methods. +// The returned value is valid until the Args is reused or released (ReleaseArgs). +// Do not store references to the returned value. Make copies instead. func (a *Args) QueryString() []byte { a.buf = a.AppendBytes(a.buf[:0]) return a.buf @@ -241,14 +242,16 @@ func (a *Args) SetBytesKNoValue(key []byte) { // Peek returns query arg value for the given key. // -// Returned value is valid until the next Args call. +// The returned value is valid until the Args is reused or released (ReleaseArgs). +// Do not store references to the returned value. Make copies instead. func (a *Args) Peek(key string) []byte { return peekArgStr(a.args, key) } // PeekBytes returns query arg value for the given key. // -// Returned value is valid until the next Args call. +// The returned value is valid until the Args is reused or released (ReleaseArgs). +// Do not store references to the returned value. Make copies instead. func (a *Args) PeekBytes(key []byte) []byte { return peekArgBytes(a.args, key) } @@ -358,6 +361,13 @@ func visitArgs(args []argsKV, f func(k, v []byte)) { } } +func visitArgsKey(args []argsKV, f func(k []byte)) { + for i, n := 0, len(args); i < n; i++ { + kv := &args[i] + f(kv.key) + } +} + func copyArgs(dst, src []argsKV) []argsKV { if cap(dst) < len(src) { tmp := make([]argsKV, len(src)) diff --git a/vendor/github.com/valyala/fasthttp/bytesconv.go b/vendor/github.com/valyala/fasthttp/bytesconv.go index 79896e8..bf582af 100644 --- a/vendor/github.com/valyala/fasthttp/bytesconv.go +++ b/vendor/github.com/valyala/fasthttp/bytesconv.go @@ -15,7 +15,6 @@ import ( "sync" "time" "unsafe" - "runtime" ) // AppendHTMLEscape appends html-escaped s to dst and returns the extended dst. @@ -99,7 +98,7 @@ func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) { } v, err := ParseUint(b[:n]) if err != nil { - return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err) + return dst, fmt.Errorf("cannot parse ipStr %q: %w", ipStr, err) } if v > 255 { return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v) @@ -109,7 +108,7 @@ func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) { } v, err := ParseUint(b) if err != nil { - return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err) + return dst, fmt.Errorf("cannot parse ipStr %q: %w", ipStr, err) } if v > 255 { return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v) @@ -348,7 +347,6 @@ func s2b(s string) (b []byte) { bh.Data = sh.Data bh.Cap = sh.Len bh.Len = sh.Len - runtime.KeepAlive(&s) return b } @@ -382,7 +380,7 @@ func appendQuotedPath(dst, src []byte) []byte { for _, c := range src { if quotedPathShouldEscapeTable[int(c)] != 0 { - dst = append(dst, '%', upperhex[c>>4], upperhex[c&15]) + dst = append(dst, '%', upperhex[c>>4], upperhex[c&0xf]) } else { dst = append(dst, c) } diff --git a/vendor/github.com/valyala/fasthttp/client.go b/vendor/github.com/valyala/fasthttp/client.go index 47bbd58..b36ca40 100644 --- a/vendor/github.com/valyala/fasthttp/client.go +++ b/vendor/github.com/valyala/fasthttp/client.go @@ -1,8 +1,9 @@ +// go:build !windows || !race + package fasthttp import ( "bufio" - "bytes" "crypto/tls" "errors" "fmt" @@ -296,6 +297,9 @@ type Client struct { // By default will use isIdempotent function RetryIf RetryIfFunc + // ConfigureClient configures the fasthttp.HostClient. + ConfigureClient func(hc *HostClient) error + mLock sync.Mutex m map[string]*HostClient ms map[string]*HostClient @@ -462,11 +466,10 @@ func (c *Client) Do(req *Request, resp *Response) error { host := uri.Host() isTLS := false - scheme := uri.Scheme() - if bytes.Equal(scheme, strHTTPS) { + if uri.isHttps() { isTLS = true - } else if !bytes.Equal(scheme, strHTTP) { - return fmt.Errorf("unsupported protocol %q. http and https are supported", scheme) + } else if !uri.isHttp() { + return fmt.Errorf("unsupported protocol %q. http and https are supported", uri.Scheme()) } startCleaner := false @@ -510,11 +513,22 @@ func (c *Client) Do(req *Request, resp *Response) error { clientReaderPool: &c.readerPool, clientWriterPool: &c.writerPool, } + + if c.ConfigureClient != nil { + if err := c.ConfigureClient(hc); err != nil { + return err + } + } + m[string(host)] = hc if len(m) == 1 { startCleaner = true } } + + atomic.AddInt32(&hc.pendingClientRequests, 1) + defer atomic.AddInt32(&hc.pendingClientRequests, -1) + c.mLock.Unlock() if startCleaner { @@ -553,12 +567,10 @@ func (c *Client) mCleaner(m map[string]*HostClient) { c.mLock.Lock() for k, v := range m { v.connsLock.Lock() - shouldRemove := v.connsCount == 0 - v.connsLock.Unlock() - - if shouldRemove { + if v.connsCount == 0 && atomic.LoadInt32(&v.pendingClientRequests) == 0 { delete(m, k) } + v.connsLock.Unlock() } if len(m) == 0 { mustStop = true @@ -783,6 +795,10 @@ type HostClient struct { pendingRequests int32 + // pendingClientRequests counts the number of requests that a Client is currently running using this HostClient. + // It will be incremented ealier than pendingRequests and will be used by Client to see if the HostClient is still in use. + pendingClientRequests int32 + connsCleanerRun bool } @@ -1361,7 +1377,7 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) req.secureErrorLogMessage = c.SecureErrorLogMessage req.Header.secureErrorLogMessage = c.SecureErrorLogMessage - if c.IsTLS != bytes.Equal(req.uri.Scheme(), strHTTPS) { + if c.IsTLS != req.URI().isHttps() { return false, ErrHostClientRedirectToDifferentScheme } diff --git a/vendor/github.com/valyala/fasthttp/cookie.go b/vendor/github.com/valyala/fasthttp/cookie.go index 7a3616c..69f7328 100644 --- a/vendor/github.com/valyala/fasthttp/cookie.go +++ b/vendor/github.com/valyala/fasthttp/cookie.go @@ -149,7 +149,8 @@ func (c *Cookie) SetPathBytes(path []byte) { // Domain returns cookie domain. // -// The returned domain is valid until the next Cookie modification method call. +// The returned value is valid until the Cookie reused or released (ReleaseCookie). +// Do not store references to the returned value. Make copies instead. func (c *Cookie) Domain() []byte { return c.domain } @@ -201,7 +202,8 @@ func (c *Cookie) SetExpire(expire time.Time) { // Value returns cookie value. // -// The returned value is valid until the next Cookie modification method call. +// The returned value is valid until the Cookie reused or released (ReleaseCookie). +// Do not store references to the returned value. Make copies instead. func (c *Cookie) Value() []byte { return c.value } @@ -218,7 +220,8 @@ func (c *Cookie) SetValueBytes(value []byte) { // Key returns cookie name. // -// The returned value is valid until the next Cookie modification method call. +// The returned value is valid until the Cookie reused or released (ReleaseCookie). +// Do not store references to the returned value. Make copies instead. func (c *Cookie) Key() []byte { return c.key } @@ -306,7 +309,8 @@ func (c *Cookie) AppendBytes(dst []byte) []byte { // Cookie returns cookie representation. // -// The returned value is valid until the next call to Cookie methods. +// The returned value is valid until the Cookie reused or released (ReleaseCookie). +// Do not store references to the returned value. Make copies instead. func (c *Cookie) Cookie() []byte { c.buf = c.AppendBytes(c.buf[:0]) return c.buf diff --git a/vendor/github.com/valyala/fasthttp/fs.go b/vendor/github.com/valyala/fasthttp/fs.go index f8d4add..72c832a 100644 --- a/vendor/github.com/valyala/fasthttp/fs.go +++ b/vendor/github.com/valyala/fasthttp/fs.go @@ -30,6 +30,10 @@ import ( // with good compression ratio. // // See also RequestCtx.SendFileBytes. +// +// WARNING: do not pass any user supplied paths to this function! +// WARNING: if path is based on user input users will be able to request +// any file on your filesystem! Use fasthttp.FS with a sane Root instead. func ServeFileBytesUncompressed(ctx *RequestCtx, path []byte) { ServeFileUncompressed(ctx, b2s(path)) } @@ -43,6 +47,10 @@ func ServeFileBytesUncompressed(ctx *RequestCtx, path []byte) { // with good compression ratio. // // See also RequestCtx.SendFile. +// +// WARNING: do not pass any user supplied paths to this function! +// WARNING: if path is based on user input users will be able to request +// any file on your filesystem! Use fasthttp.FS with a sane Root instead. func ServeFileUncompressed(ctx *RequestCtx, path string) { ctx.Request.Header.DelBytes(strAcceptEncoding) ServeFile(ctx, path) @@ -62,6 +70,10 @@ func ServeFileUncompressed(ctx *RequestCtx, path string) { // file contents. // // See also RequestCtx.SendFileBytes. +// +// WARNING: do not pass any user supplied paths to this function! +// WARNING: if path is based on user input users will be able to request +// any file on your filesystem! Use fasthttp.FS with a sane Root instead. func ServeFileBytes(ctx *RequestCtx, path []byte) { ServeFile(ctx, b2s(path)) } @@ -79,6 +91,10 @@ func ServeFileBytes(ctx *RequestCtx, path []byte) { // Use ServeFileUncompressed is you don't need serving compressed file contents. // // See also RequestCtx.SendFile. +// +// WARNING: do not pass any user supplied paths to this function! +// WARNING: if path is based on user input users will be able to request +// any file on your filesystem! Use fasthttp.FS with a sane Root instead. func ServeFile(ctx *RequestCtx, path string) { rootFSOnce.Do(func() { rootFSHandler = rootFS.NewRequestHandler() @@ -524,7 +540,7 @@ func (ff *fsFile) bigFileReader() (io.Reader, error) { f, err := os.Open(ff.f.Name()) if err != nil { - return nil, fmt.Errorf("cannot open already opened file: %s", err) + return nil, fmt.Errorf("cannot open already opened file: %w", err) } return &bigFileReader{ f: f, @@ -981,7 +997,7 @@ func (h *fsHandler) openIndexFile(ctx *RequestCtx, dirPath string, mustCompress return ff, nil } if !os.IsNotExist(err) { - return nil, fmt.Errorf("cannot open file %q: %s", indexFilePath, err) + return nil, fmt.Errorf("cannot open file %q: %w", indexFilePath, err) } } @@ -1100,7 +1116,7 @@ func (h *fsHandler) compressAndOpenFSFile(filePath string, fileEncoding string) fileInfo, err := f.Stat() if err != nil { f.Close() - return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err) + return nil, fmt.Errorf("cannot obtain info for file %q: %w", filePath, err) } if fileInfo.IsDir() { @@ -1146,7 +1162,7 @@ func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePat if err != nil { f.Close() if !os.IsPermission(err) { - return nil, fmt.Errorf("cannot create temporary file %q: %s", tmpFilePath, err) + return nil, fmt.Errorf("cannot create temporary file %q: %w", tmpFilePath, err) } return nil, errNoCreatePermission } @@ -1168,14 +1184,14 @@ func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePat zf.Close() f.Close() if err != nil { - return nil, fmt.Errorf("error when compressing file %q to %q: %s", filePath, tmpFilePath, err) + return nil, fmt.Errorf("error when compressing file %q to %q: %w", filePath, tmpFilePath, err) } if err = os.Chtimes(tmpFilePath, time.Now(), fileInfo.ModTime()); err != nil { return nil, fmt.Errorf("cannot change modification time to %s for tmp file %q: %s", fileInfo.ModTime(), tmpFilePath, err) } if err = os.Rename(tmpFilePath, compressedFilePath); err != nil { - return nil, fmt.Errorf("cannot move compressed file from %q to %q: %s", tmpFilePath, compressedFilePath, err) + return nil, fmt.Errorf("cannot move compressed file from %q to %q: %w", tmpFilePath, compressedFilePath, err) } return h.newCompressedFSFile(compressedFilePath, fileEncoding) } @@ -1183,12 +1199,12 @@ func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePat func (h *fsHandler) newCompressedFSFile(filePath string, fileEncoding string) (*fsFile, error) { f, err := os.Open(filePath) if err != nil { - return nil, fmt.Errorf("cannot open compressed file %q: %s", filePath, err) + return nil, fmt.Errorf("cannot open compressed file %q: %w", filePath, err) } fileInfo, err := f.Stat() if err != nil { f.Close() - return nil, fmt.Errorf("cannot obtain info for compressed file %q: %s", filePath, err) + return nil, fmt.Errorf("cannot obtain info for compressed file %q: %w", filePath, err) } return h.newFSFile(f, fileInfo, true, fileEncoding) } @@ -1210,7 +1226,7 @@ func (h *fsHandler) openFSFile(filePath string, mustCompress bool, fileEncoding fileInfo, err := f.Stat() if err != nil { f.Close() - return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err) + return nil, fmt.Errorf("cannot obtain info for file %q: %w", filePath, err) } if fileInfo.IsDir() { @@ -1226,7 +1242,7 @@ func (h *fsHandler) openFSFile(filePath string, mustCompress bool, fileEncoding fileInfoOriginal, err := os.Stat(filePathOriginal) if err != nil { f.Close() - return nil, fmt.Errorf("cannot obtain info for original file %q: %s", filePathOriginal, err) + return nil, fmt.Errorf("cannot obtain info for original file %q: %w", filePathOriginal, err) } // Only re-create the compressed file if there was more than a second between the mod times. @@ -1257,7 +1273,7 @@ func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool, if len(contentType) == 0 { data, err := readFileHeader(f, compressed, fileEncoding) if err != nil { - return nil, fmt.Errorf("cannot read header of the file %q: %s", f.Name(), err) + return nil, fmt.Errorf("cannot read header of the file %q: %w", f.Name(), err) } contentType = http.DetectContentType(data) } @@ -1370,18 +1386,10 @@ func fsModTime(t time.Time) time.Time { return t.In(time.UTC).Truncate(time.Second) } -var ( - filesLockMap = make(map[string]*sync.Mutex) - filesLockMapLock sync.Mutex -) +var filesLockMap sync.Map func getFileLock(absPath string) *sync.Mutex { - filesLockMapLock.Lock() - flock := filesLockMap[absPath] - if flock == nil { - flock = &sync.Mutex{} - filesLockMap[absPath] = flock - } - filesLockMapLock.Unlock() - return flock + v, _ := filesLockMap.LoadOrStore(absPath, &sync.Mutex{}) + filelock := v.(*sync.Mutex) + return filelock } diff --git a/vendor/github.com/valyala/fasthttp/go.mod b/vendor/github.com/valyala/fasthttp/go.mod index 1711091..f5cfe1b 100644 --- a/vendor/github.com/valyala/fasthttp/go.mod +++ b/vendor/github.com/valyala/fasthttp/go.mod @@ -3,11 +3,11 @@ module github.com/valyala/fasthttp go 1.12 require ( - github.com/andybalholm/brotli v1.0.2 - github.com/klauspost/compress v1.13.4 + github.com/andybalholm/brotli v1.0.4 + github.com/klauspost/compress v1.15.0 github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/tcplisten v1.0.0 - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a - golang.org/x/net v0.0.0-20210510120150-4163338589ed - golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 + golang.org/x/crypto v0.0.0-20220214200702-86341886e292 + golang.org/x/net v0.0.0-20220225172249-27dd8689420f + golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 ) diff --git a/vendor/github.com/valyala/fasthttp/go.sum b/vendor/github.com/valyala/fasthttp/go.sum index 5c59aef..8595e94 100644 --- a/vendor/github.com/valyala/fasthttp/go.sum +++ b/vendor/github.com/valyala/fasthttp/go.sum @@ -1,23 +1,26 @@ -github.com/andybalholm/brotli v1.0.2 h1:JKnhI/XQ75uFBTiuzXpzFrUriDPiZjlOSzh6wXogP0E= -github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U= +github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210510120150-4163338589ed h1:p9UgmWI9wKpfYmgaV/IZKGdXc5qEK45tDwwwDyjS26I= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 h1:hZR0X1kPW+nwyJ9xRxqZk1vx5RUObAPBdKVvXPDUH/E= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 h1:nhht2DYV/Sn3qOayu8lM+cU1ii9sTLUeBQwQQfUHtrs= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/valyala/fasthttp/header.go b/vendor/github.com/valyala/fasthttp/header.go index 73e9844..7b9df08 100644 --- a/vendor/github.com/valyala/fasthttp/header.go +++ b/vendor/github.com/valyala/fasthttp/header.go @@ -33,6 +33,8 @@ type ResponseHeader struct { noDefaultDate bool statusCode int + statusMessage []byte + protocol []byte contentLength int contentLengthBytes []byte secureErrorLogMessage bool @@ -40,8 +42,9 @@ type ResponseHeader struct { contentType []byte server []byte - h []argsKV - bufKV argsKV + h []argsKV + trailer []argsKV + bufKV argsKV cookies []argsKV } @@ -56,9 +59,10 @@ type ResponseHeader struct { type RequestHeader struct { noCopy noCopy //nolint:unused,structcheck - disableNormalizing bool - noHTTP11 bool - connectionClose bool + disableNormalizing bool + noHTTP11 bool + connectionClose bool + noDefaultContentType bool // These two fields have been moved close to other bool fields // for reducing RequestHeader object size. @@ -75,8 +79,9 @@ type RequestHeader struct { contentType []byte userAgent []byte - h []argsKV - bufKV argsKV + h []argsKV + trailer []argsKV + bufKV argsKV cookies []argsKV @@ -136,6 +141,29 @@ func (h *ResponseHeader) SetStatusCode(statusCode int) { h.statusCode = statusCode } +// StatusMessage returns response status message. +func (h *ResponseHeader) StatusMessage() []byte { + return h.statusMessage +} + +// SetStatusMessage sets response status message bytes. +func (h *ResponseHeader) SetStatusMessage(statusMessage []byte) { + h.statusMessage = append(h.statusMessage[:0], statusMessage...) +} + +// Protocol returns response protocol bytes. +func (h *ResponseHeader) Protocol() []byte { + if len(h.protocol) > 0 { + return h.protocol + } + return strHTTP11 +} + +// SetProtocol sets response protocol bytes. +func (h *ResponseHeader) SetProtocol(protocol []byte) { + h.protocol = append(h.protocol[:0], protocol...) +} + // SetLastModified sets 'Last-Modified' header to the given value. func (h *ResponseHeader) SetLastModified(t time.Time) { h.bufKV.value = AppendHTTPDate(h.bufKV.value[:0], t) @@ -271,11 +299,11 @@ func (h *RequestHeader) SetContentLength(contentLength int) { func (h *ResponseHeader) isCompressibleContentType() bool { contentType := h.ContentType() return bytes.HasPrefix(contentType, strTextSlash) || - bytes.HasPrefix(contentType, strApplicationSlash) || - bytes.HasPrefix(contentType, strImageSVG) || - bytes.HasPrefix(contentType, strImageIcon) || - bytes.HasPrefix(contentType, strFontSlash) || - bytes.HasPrefix(contentType, strMultipartSlash) + bytes.HasPrefix(contentType, strApplicationSlash) || + bytes.HasPrefix(contentType, strImageSVG) || + bytes.HasPrefix(contentType, strImageIcon) || + bytes.HasPrefix(contentType, strFontSlash) || + bytes.HasPrefix(contentType, strMultipartSlash) } // ContentType returns Content-Type header value. @@ -357,6 +385,117 @@ func (h *RequestHeader) SetMultipartFormBoundaryBytes(boundary []byte) { h.SetContentTypeBytes(h.bufKV.value) } +// SetTrailer sets header Trailer value for chunked response +// to indicate which headers will be sent after the body. +// +// Use Set to set the trailer header later. +// +// Trailers are only supported with chunked transfer. +// Trailers allow the sender to include additional headers at the end of chunked messages. +// +// The following trailers are forbidden: +// 1. necessary for message framing (e.g., Transfer-Encoding and Content-Length), +// 2. routing (e.g., Host), +// 3. request modifiers (e.g., controls and conditionals in Section 5 of [RFC7231]), +// 4. authentication (e.g., see [RFC7235] and [RFC6265]), +// 5. response control data (e.g., see Section 7.1 of [RFC7231]), +// 6. determining how to process the payload (e.g., Content-Encoding, Content-Type, Content-Range, and Trailer) +// +// Return ErrBadTrailer if contain any forbidden trailers. +func (h *ResponseHeader) SetTrailer(trailer string) error { + return h.SetTrailerBytes(s2b(trailer)) +} + +// SetTrailerBytes sets Trailer header value for chunked response +// to indicate which headers will be sent after the body. +// +// Use Set to set the trailer header later. +// +// Trailers are only supported with chunked transfer. +// Trailers allow the sender to include additional headers at the end of chunked messages. +// +// The following trailers are forbidden: +// 1. necessary for message framing (e.g., Transfer-Encoding and Content-Length), +// 2. routing (e.g., Host), +// 3. request modifiers (e.g., controls and conditionals in Section 5 of [RFC7231]), +// 4. authentication (e.g., see [RFC7235] and [RFC6265]), +// 5. response control data (e.g., see Section 7.1 of [RFC7231]), +// 6. determining how to process the payload (e.g., Content-Encoding, Content-Type, Content-Range, and Trailer) +// +// Return ErrBadTrailer if contain any forbidden trailers. +func (h *ResponseHeader) SetTrailerBytes(trailer []byte) error { + h.trailer = h.trailer[:0] + return h.AddTrailerBytes(trailer) +} + +// AddTrailer add Trailer header value for chunked response +// to indicate which headers will be sent after the body. +// +// Use Set to set the trailer header later. +// +// Trailers are only supported with chunked transfer. +// Trailers allow the sender to include additional headers at the end of chunked messages. +// +// The following trailers are forbidden: +// 1. necessary for message framing (e.g., Transfer-Encoding and Content-Length), +// 2. routing (e.g., Host), +// 3. request modifiers (e.g., controls and conditionals in Section 5 of [RFC7231]), +// 4. authentication (e.g., see [RFC7235] and [RFC6265]), +// 5. response control data (e.g., see Section 7.1 of [RFC7231]), +// 6. determining how to process the payload (e.g., Content-Encoding, Content-Type, Content-Range, and Trailer) +// +// Return ErrBadTrailer if contain any forbidden trailers. +func (h *ResponseHeader) AddTrailer(trailer string) error { + return h.AddTrailerBytes(s2b(trailer)) +} + +var ErrBadTrailer = errors.New("contain forbidden trailer") + +// AddTrailerBytes add Trailer header value for chunked response +// to indicate which headers will be sent after the body. +// +// Use Set to set the trailer header later. +// +// Trailers are only supported with chunked transfer. +// Trailers allow the sender to include additional headers at the end of chunked messages. +// +// The following trailers are forbidden: +// 1. necessary for message framing (e.g., Transfer-Encoding and Content-Length), +// 2. routing (e.g., Host), +// 3. request modifiers (e.g., controls and conditionals in Section 5 of [RFC7231]), +// 4. authentication (e.g., see [RFC7235] and [RFC6265]), +// 5. response control data (e.g., see Section 7.1 of [RFC7231]), +// 6. determining how to process the payload (e.g., Content-Encoding, Content-Type, Content-Range, and Trailer) +// +// Return ErrBadTrailer if contain any forbidden trailers. +func (h *ResponseHeader) AddTrailerBytes(trailer []byte) error { + var err error + for i := -1; i+1 < len(trailer); { + trailer = trailer[i+1:] + i = bytes.IndexByte(trailer, ',') + if i < 0 { + i = len(trailer) + } + key := trailer[:i] + for len(key) > 0 && key[0] == ' ' { + key = key[1:] + } + for len(key) > 0 && key[len(key)-1] == ' ' { + key = key[:len(key)-1] + } + // Forbidden by RFC 7230, section 4.1.2 + if isBadTrailer(key) { + err = ErrBadTrailer + continue + } + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.trailer = appendArgBytes(h.trailer, h.bufKV.key, nil, argsNoValue) + } + + return err +} + // MultipartFormBoundary returns boundary part // from 'multipart/form-data; boundary=...' Content-Type. func (h *RequestHeader) MultipartFormBoundary() []byte { @@ -505,6 +644,115 @@ func (h *RequestHeader) SetRequestURIBytes(requestURI []byte) { h.requestURI = append(h.requestURI[:0], requestURI...) } +// SetTrailer sets Trailer header value for chunked request +// to indicate which headers will be sent after the body. +// +// Use Set to set the trailer header later. +// +// Trailers are only supported with chunked transfer. +// Trailers allow the sender to include additional headers at the end of chunked messages. +// +// The following trailers are forbidden: +// 1. necessary for message framing (e.g., Transfer-Encoding and Content-Length), +// 2. routing (e.g., Host), +// 3. request modifiers (e.g., controls and conditionals in Section 5 of [RFC7231]), +// 4. authentication (e.g., see [RFC7235] and [RFC6265]), +// 5. response control data (e.g., see Section 7.1 of [RFC7231]), +// 6. determining how to process the payload (e.g., Content-Encoding, Content-Type, Content-Range, and Trailer) +// +// Return ErrBadTrailer if contain any forbidden trailers. +func (h *RequestHeader) SetTrailer(trailer string) error { + return h.SetTrailerBytes(s2b(trailer)) +} + +// SetTrailerBytes sets Trailer header value for chunked request +// to indicate which headers will be sent after the body. +// +// Use Set to set the trailer header later. +// +// Trailers are only supported with chunked transfer. +// Trailers allow the sender to include additional headers at the end of chunked messages. +// +// The following trailers are forbidden: +// 1. necessary for message framing (e.g., Transfer-Encoding and Content-Length), +// 2. routing (e.g., Host), +// 3. request modifiers (e.g., controls and conditionals in Section 5 of [RFC7231]), +// 4. authentication (e.g., see [RFC7235] and [RFC6265]), +// 5. response control data (e.g., see Section 7.1 of [RFC7231]), +// 6. determining how to process the payload (e.g., Content-Encoding, Content-Type, Content-Range, and Trailer) +// +// Return ErrBadTrailer if contain any forbidden trailers. +func (h *RequestHeader) SetTrailerBytes(trailer []byte) error { + h.trailer = h.trailer[:0] + return h.AddTrailerBytes(trailer) +} + +// AddTrailer add Trailer header value for chunked request +// to indicate which headers will be sent after the body. +// +// Use Set to set the trailer header later. +// +// Trailers are only supported with chunked transfer. +// Trailers allow the sender to include additional headers at the end of chunked messages. +// +// The following trailers are forbidden: +// 1. necessary for message framing (e.g., Transfer-Encoding and Content-Length), +// 2. routing (e.g., Host), +// 3. request modifiers (e.g., controls and conditionals in Section 5 of [RFC7231]), +// 4. authentication (e.g., see [RFC7235] and [RFC6265]), +// 5. response control data (e.g., see Section 7.1 of [RFC7231]), +// 6. determining how to process the payload (e.g., Content-Encoding, Content-Type, Content-Range, and Trailer) +// +// Return ErrBadTrailer if contain any forbidden trailers. +func (h *RequestHeader) AddTrailer(trailer string) error { + return h.AddTrailerBytes(s2b(trailer)) +} + +// AddTrailerBytes add Trailer header value for chunked request +// to indicate which headers will be sent after the body. +// +// Use Set to set the trailer header later. +// +// Trailers are only supported with chunked transfer. +// Trailers allow the sender to include additional headers at the end of chunked messages. +// +// The following trailers are forbidden: +// 1. necessary for message framing (e.g., Transfer-Encoding and Content-Length), +// 2. routing (e.g., Host), +// 3. request modifiers (e.g., controls and conditionals in Section 5 of [RFC7231]), +// 4. authentication (e.g., see [RFC7235] and [RFC6265]), +// 5. response control data (e.g., see Section 7.1 of [RFC7231]), +// 6. determining how to process the payload (e.g., Content-Encoding, Content-Type, Content-Range, and Trailer) +// +// Return ErrBadTrailer if contain any forbidden trailers. +func (h *RequestHeader) AddTrailerBytes(trailer []byte) error { + var err error + for i := -1; i+1 < len(trailer); { + trailer = trailer[i+1:] + i = bytes.IndexByte(trailer, ',') + if i < 0 { + i = len(trailer) + } + key := trailer[:i] + for len(key) > 0 && key[0] == ' ' { + key = key[1:] + } + for len(key) > 0 && key[len(key)-1] == ' ' { + key = key[:len(key)-1] + } + // Forbidden by RFC 7230, section 4.1.2 + if isBadTrailer(key) { + err = ErrBadTrailer + continue + } + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.trailer = appendArgBytes(h.trailer, h.bufKV.key, nil, argsNoValue) + } + + return err +} + // IsGet returns true if request method is GET. func (h *RequestHeader) IsGet() bool { return string(h.Method()) == MethodGet @@ -683,6 +931,8 @@ func (h *ResponseHeader) resetSkipNormalize() { h.connectionClose = false h.statusCode = 0 + h.statusMessage = h.statusMessage[:0] + h.protocol = h.protocol[:0] h.contentLength = 0 h.contentLengthBytes = h.contentLengthBytes[:0] @@ -691,11 +941,18 @@ func (h *ResponseHeader) resetSkipNormalize() { h.h = h.h[:0] h.cookies = h.cookies[:0] + h.trailer = h.trailer[:0] +} + +// SetNoDefaultContentType allows you to control if a default Content-Type header will be set (false) or not (true). +func (h *RequestHeader) SetNoDefaultContentType(noDefaultContentType bool) { + h.noDefaultContentType = noDefaultContentType } // Reset clears request header. func (h *RequestHeader) Reset() { h.disableNormalizing = false + h.SetNoDefaultContentType(false) h.resetSkipNormalize() } @@ -712,6 +969,7 @@ func (h *RequestHeader) resetSkipNormalize() { h.host = h.host[:0] h.contentType = h.contentType[:0] h.userAgent = h.userAgent[:0] + h.trailer = h.trailer[:0] h.h = h.h[:0] h.cookies = h.cookies[:0] @@ -731,12 +989,15 @@ func (h *ResponseHeader) CopyTo(dst *ResponseHeader) { dst.noDefaultDate = h.noDefaultDate dst.statusCode = h.statusCode + dst.statusMessage = append(dst.statusMessage, h.statusMessage...) + dst.protocol = append(dst.protocol, h.protocol...) dst.contentLength = h.contentLength - dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...) - dst.contentType = append(dst.contentType[:0], h.contentType...) - dst.server = append(dst.server[:0], h.server...) + dst.contentLengthBytes = append(dst.contentLengthBytes, h.contentLengthBytes...) + dst.contentType = append(dst.contentType, h.contentType...) + dst.server = append(dst.server, h.server...) dst.h = copyArgs(dst.h, h.h) dst.cookies = copyArgs(dst.cookies, h.cookies) + dst.trailer = copyArgs(dst.trailer, h.trailer) } // CopyTo copies all the headers to dst. @@ -748,17 +1009,18 @@ func (h *RequestHeader) CopyTo(dst *RequestHeader) { dst.connectionClose = h.connectionClose dst.contentLength = h.contentLength - dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...) - dst.method = append(dst.method[:0], h.method...) - dst.proto = append(dst.proto[:0], h.proto...) - dst.requestURI = append(dst.requestURI[:0], h.requestURI...) - dst.host = append(dst.host[:0], h.host...) - dst.contentType = append(dst.contentType[:0], h.contentType...) - dst.userAgent = append(dst.userAgent[:0], h.userAgent...) + dst.contentLengthBytes = append(dst.contentLengthBytes, h.contentLengthBytes...) + dst.method = append(dst.method, h.method...) + dst.proto = append(dst.proto, h.proto...) + dst.requestURI = append(dst.requestURI, h.requestURI...) + dst.host = append(dst.host, h.host...) + dst.contentType = append(dst.contentType, h.contentType...) + dst.userAgent = append(dst.userAgent, h.userAgent...) + dst.trailer = append(dst.trailer, h.trailer...) dst.h = copyArgs(dst.h, h.h) dst.cookies = copyArgs(dst.cookies, h.cookies) dst.cookiesCollected = h.cookiesCollected - dst.rawHeaders = append(dst.rawHeaders[:0], h.rawHeaders...) + dst.rawHeaders = append(dst.rawHeaders, h.rawHeaders...) } // VisitAll calls f for each header. @@ -782,12 +1044,29 @@ func (h *ResponseHeader) VisitAll(f func(key, value []byte)) { f(strSetCookie, v) }) } + if len(h.trailer) > 0 { + f(strTrailer, appendArgsKeyBytes(nil, h.trailer, strCommaSpace)) + } visitArgs(h.h, f) if h.ConnectionClose() { f(strConnection, strClose) } } +// VisitAllTrailer calls f for each response Trailer. +// +// f must not retain references to value after returning. +func (h *ResponseHeader) VisitAllTrailer(f func(value []byte)) { + visitArgsKey(h.trailer, f) +} + +// VisitAllTrailer calls f for each request Trailer. +// +// f must not retain references to value after returning. +func (h *RequestHeader) VisitAllTrailer(f func(value []byte)) { + visitArgsKey(h.trailer, f) +} + // VisitAllCookie calls f for each response cookie. // // Cookie name is passed in key and the whole Set-Cookie header value @@ -829,6 +1108,9 @@ func (h *RequestHeader) VisitAll(f func(key, value []byte)) { if len(userAgent) > 0 { f(strUserAgent, userAgent) } + if len(h.trailer) > 0 { + f(strTrailer, appendArgsKeyBytes(nil, h.trailer, strCommaSpace)) + } h.collectCookies() if len(h.cookies) > 0 { @@ -885,6 +1167,8 @@ func (h *ResponseHeader) del(key []byte) { h.contentLengthBytes = h.contentLengthBytes[:0] case HeaderConnection: h.connectionClose = false + case HeaderTrailer: + h.trailer = h.trailer[:0] } h.h = delAllArgsBytes(h.h, key) } @@ -917,6 +1201,8 @@ func (h *RequestHeader) del(key []byte) { h.contentLengthBytes = h.contentLengthBytes[:0] case HeaderConnection: h.connectionClose = false + case HeaderTrailer: + h.trailer = h.trailer[:0] } h.h = delAllArgsBytes(h.h, key) } @@ -962,6 +1248,9 @@ func (h *ResponseHeader) setSpecialHeader(key, value []byte) bool { if caseInsensitiveCompare(strTransferEncoding, key) { // Transfer-Encoding is managed automatically. return true + } else if caseInsensitiveCompare(strTrailer, key) { + _ = h.SetTrailerBytes(value) + return true } case 'd': if caseInsensitiveCompare(strDate, key) { @@ -1007,6 +1296,9 @@ func (h *RequestHeader) setSpecialHeader(key, value []byte) bool { if caseInsensitiveCompare(strTransferEncoding, key) { // Transfer-Encoding is managed automatically. return true + } else if caseInsensitiveCompare(strTrailer, key) { + _ = h.SetTrailerBytes(value) + return true } case 'h': if caseInsensitiveCompare(strHost, key) { @@ -1031,6 +1323,9 @@ func (h *RequestHeader) setSpecialHeader(key, value []byte) bool { // the Content-Type, Content-Length, Connection, Server, Set-Cookie, // Transfer-Encoding and Date headers can only be set once and will // overwrite the previous value. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details), +// it will be sent after the chunked response body. func (h *ResponseHeader) Add(key, value string) { h.AddBytesKV(s2b(key), s2b(value)) } @@ -1043,6 +1338,9 @@ func (h *ResponseHeader) Add(key, value string) { // the Content-Type, Content-Length, Connection, Server, Set-Cookie, // Transfer-Encoding and Date headers can only be set once and will // overwrite the previous value. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details), +// it will be sent after the chunked response body. func (h *ResponseHeader) AddBytesK(key []byte, value string) { h.AddBytesKV(key, s2b(value)) } @@ -1055,6 +1353,9 @@ func (h *ResponseHeader) AddBytesK(key []byte, value string) { // the Content-Type, Content-Length, Connection, Server, Set-Cookie, // Transfer-Encoding and Date headers can only be set once and will // overwrite the previous value. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details), +// it will be sent after the chunked response body. func (h *ResponseHeader) AddBytesV(key string, value []byte) { h.AddBytesKV(s2b(key), value) } @@ -1067,6 +1368,9 @@ func (h *ResponseHeader) AddBytesV(key string, value []byte) { // the Content-Type, Content-Length, Connection, Server, Set-Cookie, // Transfer-Encoding and Date headers can only be set once and will // overwrite the previous value. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details), +// it will be sent after the chunked response body. func (h *ResponseHeader) AddBytesKV(key, value []byte) { if h.setSpecialHeader(key, value) { return @@ -1078,6 +1382,9 @@ func (h *ResponseHeader) AddBytesKV(key, value []byte) { // Set sets the given 'key: value' header. // +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked response body. +// // Use Add for setting multiple header values under the same key. func (h *ResponseHeader) Set(key, value string) { initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) @@ -1086,6 +1393,9 @@ func (h *ResponseHeader) Set(key, value string) { // SetBytesK sets the given 'key: value' header. // +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked response body. +// // Use AddBytesK for setting multiple header values under the same key. func (h *ResponseHeader) SetBytesK(key []byte, value string) { h.bufKV.value = append(h.bufKV.value[:0], value...) @@ -1094,6 +1404,9 @@ func (h *ResponseHeader) SetBytesK(key []byte, value string) { // SetBytesV sets the given 'key: value' header. // +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked response body. +// // Use AddBytesV for setting multiple header values under the same key. func (h *ResponseHeader) SetBytesV(key string, value []byte) { k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) @@ -1102,6 +1415,9 @@ func (h *ResponseHeader) SetBytesV(key string, value []byte) { // SetBytesKV sets the given 'key: value' header. // +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked response body. +// // Use AddBytesKV for setting multiple header values under the same key. func (h *ResponseHeader) SetBytesKV(key, value []byte) { h.bufKV.key = append(h.bufKV.key[:0], key...) @@ -1111,6 +1427,9 @@ func (h *ResponseHeader) SetBytesKV(key, value []byte) { // SetCanonical sets the given 'key: value' header assuming that // key is in canonical form. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked response body. func (h *ResponseHeader) SetCanonical(key, value []byte) { if h.setSpecialHeader(key, value) { return @@ -1224,6 +1543,9 @@ func (h *RequestHeader) DelAllCookies() { // // Multiple headers with the same key may be added with this function. // Use Set for setting a single header for the given key. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details), +// it will be sent after the chunked request body. func (h *RequestHeader) Add(key, value string) { h.AddBytesKV(s2b(key), s2b(value)) } @@ -1232,6 +1554,9 @@ func (h *RequestHeader) Add(key, value string) { // // Multiple headers with the same key may be added with this function. // Use SetBytesK for setting a single header for the given key. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details), +// it will be sent after the chunked request body. func (h *RequestHeader) AddBytesK(key []byte, value string) { h.AddBytesKV(key, s2b(value)) } @@ -1240,6 +1565,9 @@ func (h *RequestHeader) AddBytesK(key []byte, value string) { // // Multiple headers with the same key may be added with this function. // Use SetBytesV for setting a single header for the given key. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details), +// it will be sent after the chunked request body. func (h *RequestHeader) AddBytesV(key string, value []byte) { h.AddBytesKV(s2b(key), value) } @@ -1252,6 +1580,9 @@ func (h *RequestHeader) AddBytesV(key string, value []byte) { // the Content-Type, Content-Length, Connection, Cookie, // Transfer-Encoding, Host and User-Agent headers can only be set once // and will overwrite the previous value. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details), +// it will be sent after the chunked request body. func (h *RequestHeader) AddBytesKV(key, value []byte) { if h.setSpecialHeader(key, value) { return @@ -1263,6 +1594,9 @@ func (h *RequestHeader) AddBytesKV(key, value []byte) { // Set sets the given 'key: value' header. // +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked request body. +// // Use Add for setting multiple header values under the same key. func (h *RequestHeader) Set(key, value string) { initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) @@ -1271,6 +1605,9 @@ func (h *RequestHeader) Set(key, value string) { // SetBytesK sets the given 'key: value' header. // +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked request body. +// // Use AddBytesK for setting multiple header values under the same key. func (h *RequestHeader) SetBytesK(key []byte, value string) { h.bufKV.value = append(h.bufKV.value[:0], value...) @@ -1279,6 +1616,9 @@ func (h *RequestHeader) SetBytesK(key []byte, value string) { // SetBytesV sets the given 'key: value' header. // +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked request body. +// // Use AddBytesV for setting multiple header values under the same key. func (h *RequestHeader) SetBytesV(key string, value []byte) { k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) @@ -1287,6 +1627,9 @@ func (h *RequestHeader) SetBytesV(key string, value []byte) { // SetBytesKV sets the given 'key: value' header. // +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked request body. +// // Use AddBytesKV for setting multiple header values under the same key. func (h *RequestHeader) SetBytesKV(key, value []byte) { h.bufKV.key = append(h.bufKV.key[:0], key...) @@ -1296,6 +1639,9 @@ func (h *RequestHeader) SetBytesKV(key, value []byte) { // SetCanonical sets the given 'key: value' header assuming that // key is in canonical form. +// +// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details), +// it will be sent after the chunked request body. func (h *RequestHeader) SetCanonical(key, value []byte) { if h.setSpecialHeader(key, value) { return @@ -1306,8 +1652,9 @@ func (h *RequestHeader) SetCanonical(key, value []byte) { // Peek returns header value for the given key. // -// Returned value is valid until the next call to ResponseHeader. -// Do not store references to returned value. Make copies instead. +// The returned value is valid until the response is released, +// either though ReleaseResponse or your request handler returning. +// Do not store references to the returned value. Make copies instead. func (h *ResponseHeader) Peek(key string) []byte { k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) return h.peek(k) @@ -1315,7 +1662,8 @@ func (h *ResponseHeader) Peek(key string) []byte { // PeekBytes returns header value for the given key. // -// Returned value is valid until the next call to ResponseHeader. +// The returned value is valid until the response is released, +// either though ReleaseResponse or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *ResponseHeader) PeekBytes(key []byte) []byte { h.bufKV.key = append(h.bufKV.key[:0], key...) @@ -1325,7 +1673,8 @@ func (h *ResponseHeader) PeekBytes(key []byte) []byte { // Peek returns header value for the given key. // -// Returned value is valid until the next call to RequestHeader. +// The returned value is valid until the request is released, +// either though ReleaseRequest or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *RequestHeader) Peek(key string) []byte { k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) @@ -1334,7 +1683,8 @@ func (h *RequestHeader) Peek(key string) []byte { // PeekBytes returns header value for the given key. // -// Returned value is valid until the next call to RequestHeader. +// The returned value is valid until the request is released, +// either though ReleaseRequest or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *RequestHeader) PeekBytes(key []byte) []byte { h.bufKV.key = append(h.bufKV.key[:0], key...) @@ -1357,6 +1707,8 @@ func (h *ResponseHeader) peek(key []byte) []byte { return h.contentLengthBytes case HeaderSetCookie: return appendResponseCookieBytes(nil, h.cookies) + case HeaderTrailer: + return appendArgsKeyBytes(nil, h.trailer, strCommaSpace) default: return peekArgBytes(h.h, key) } @@ -1382,6 +1734,8 @@ func (h *RequestHeader) peek(key []byte) []byte { return appendRequestCookieBytes(nil, h.cookies) } return peekArgBytes(h.h, key) + case HeaderTrailer: + return appendArgsKeyBytes(nil, h.trailer, strCommaSpace) default: return peekArgBytes(h.h, key) } @@ -1450,11 +1804,11 @@ func (h *ResponseHeader) tryRead(r *bufio.Reader, n int) error { } } return &ErrSmallBuffer{ - error: fmt.Errorf("error when reading response headers: %s", errSmallBuffer), + error: fmt.Errorf("error when reading response headers: %w", errSmallBuffer), } } - return fmt.Errorf("error when reading response headers: %s", err) + return fmt.Errorf("error when reading response headers: %w", err) } b = mustPeekBuffered(r) headersLen, errParse := h.parse(b) @@ -1465,6 +1819,61 @@ func (h *ResponseHeader) tryRead(r *bufio.Reader, n int) error { return nil } +// ReadTrailer reads response trailer header from r. +// +// io.EOF is returned if r is closed before reading the first byte. +func (h *ResponseHeader) ReadTrailer(r *bufio.Reader) error { + n := 1 + for { + err := h.tryReadTrailer(r, n) + if err == nil { + return nil + } + if err != errNeedMore { + return err + } + n = r.Buffered() + 1 + } +} + +func (h *ResponseHeader) tryReadTrailer(r *bufio.Reader, n int) error { + b, err := r.Peek(n) + if len(b) == 0 { + // Return ErrTimeout on any timeout. + if x, ok := err.(interface{ Timeout() bool }); ok && x.Timeout() { + return ErrTimeout + } + + if n == 1 || err == io.EOF { + return io.EOF + } + + // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . + if err == bufio.ErrBufferFull { + if h.secureErrorLogMessage { + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading response trailer"), + } + } + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading response trailer: %w", errSmallBuffer), + } + } + + return fmt.Errorf("error when reading response trailer: %w", err) + } + b = mustPeekBuffered(r) + headersLen, errParse := h.parseTrailer(b) + if errParse != nil { + if err == io.EOF { + return err + } + return headerError("response", err, errParse, b, h.secureErrorLogMessage) + } + mustDiscard(r, headersLen) + return nil +} + func headerError(typ string, err, errParse error, b []byte, secureErrorLogMessage bool) error { if errParse != errNeedMore { return headerErrorMsg(typ, errParse, b, secureErrorLogMessage) @@ -1489,9 +1898,9 @@ func headerError(typ string, err, errParse error, b []byte, secureErrorLogMessag func headerErrorMsg(typ string, err error, b []byte, secureErrorLogMessage bool) error { if secureErrorLogMessage { - return fmt.Errorf("error when reading %s headers: %s. Buffer size=%d", typ, err, len(b)) + return fmt.Errorf("error when reading %s headers: %w. Buffer size=%d", typ, err, len(b)) } - return fmt.Errorf("error when reading %s headers: %s. Buffer size=%d, contents: %s", typ, err, len(b), bufferSnippet(b)) + return fmt.Errorf("error when reading %s headers: %w. Buffer size=%d, contents: %s", typ, err, len(b), bufferSnippet(b)) } // Read reads request header from r. @@ -1519,6 +1928,61 @@ func (h *RequestHeader) readLoop(r *bufio.Reader, waitForMore bool) error { } } +// ReadTrailer reads request trailer header from r. +// +// io.EOF is returned if r is closed before reading the first byte. +func (h *RequestHeader) ReadTrailer(r *bufio.Reader) error { + n := 1 + for { + err := h.tryReadTrailer(r, n) + if err == nil { + return nil + } + if err != errNeedMore { + return err + } + n = r.Buffered() + 1 + } +} + +func (h *RequestHeader) tryReadTrailer(r *bufio.Reader, n int) error { + b, err := r.Peek(n) + if len(b) == 0 { + // Return ErrTimeout on any timeout. + if x, ok := err.(interface{ Timeout() bool }); ok && x.Timeout() { + return ErrTimeout + } + + if n == 1 || err == io.EOF { + return io.EOF + } + + // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . + if err == bufio.ErrBufferFull { + if h.secureErrorLogMessage { + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading request trailer"), + } + } + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading request trailer: %w", errSmallBuffer), + } + } + + return fmt.Errorf("error when reading request trailer: %w", err) + } + b = mustPeekBuffered(r) + headersLen, errParse := h.parseTrailer(b) + if errParse != nil { + if err == io.EOF { + return err + } + return headerError("request", err, errParse, b, h.secureErrorLogMessage) + } + mustDiscard(r, headersLen) + return nil +} + func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error { h.resetSkipNormalize() b, err := r.Peek(n) @@ -1534,7 +1998,7 @@ func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error { // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . if err == bufio.ErrBufferFull { return &ErrSmallBuffer{ - error: fmt.Errorf("error when reading request headers: %s", errSmallBuffer), + error: fmt.Errorf("error when reading request headers: %w (n=%d, r.Buffered()=%d)", errSmallBuffer, n, r.Buffered()), } } @@ -1544,7 +2008,7 @@ func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error { return ErrNothingRead{err} } - return fmt.Errorf("error when reading request headers: %s", err) + return fmt.Errorf("error when reading request headers: %w", err) } b = mustPeekBuffered(r) headersLen, errParse := h.parse(b) @@ -1615,25 +2079,58 @@ func (h *ResponseHeader) WriteTo(w io.Writer) (int64, error) { // Header returns response header representation. // -// The returned value is valid until the next call to ResponseHeader methods. +// Headers that set as Trailer will not represent. Use TrailerHeader for trailers. +// +// The returned value is valid until the request is released, +// either though ReleaseRequest or your request handler returning. +// Do not store references to returned value. Make copies instead. func (h *ResponseHeader) Header() []byte { h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) return h.bufKV.value } +// writeTrailer writes response trailer to w. +func (h *ResponseHeader) writeTrailer(w *bufio.Writer) error { + _, err := w.Write(h.TrailerHeader()) + return err +} + +// TrailerHeader returns response trailer header representation. +// +// Trailers will only be received with chunked transfer. +// +// The returned value is valid until the request is released, +// either though ReleaseRequest or your request handler returning. +// Do not store references to returned value. Make copies instead. +func (h *ResponseHeader) TrailerHeader() []byte { + h.bufKV.value = h.bufKV.value[:0] + for _, t := range h.trailer { + value := h.peek(t.key) + h.bufKV.value = appendHeaderLine(h.bufKV.value, t.key, value) + } + h.bufKV.value = append(h.bufKV.value, strCRLF...) + return h.bufKV.value +} + // String returns response header representation. func (h *ResponseHeader) String() string { return string(h.Header()) } -// AppendBytes appends response header representation to dst and returns +// appendStatusLine appends the response status line to dst and returns // the extended dst. -func (h *ResponseHeader) AppendBytes(dst []byte) []byte { +func (h *ResponseHeader) appendStatusLine(dst []byte) []byte { statusCode := h.StatusCode() if statusCode < 0 { statusCode = StatusOK } - dst = append(dst, statusLine(statusCode)...) + return formatStatusLine(dst, h.Protocol(), statusCode, h.StatusMessage()) +} + +// AppendBytes appends response header representation to dst and returns +// the extended dst. +func (h *ResponseHeader) AppendBytes(dst []byte) []byte { + dst = h.appendStatusLine(dst[:0]) server := h.Server() if len(server) != 0 { @@ -1661,11 +2158,24 @@ func (h *ResponseHeader) AppendBytes(dst []byte) []byte { for i, n := 0, len(h.h); i < n; i++ { kv := &h.h[i] - if h.noDefaultDate || !bytes.Equal(kv.key, strDate) { + + // Exclude trailer from header + exclude := false + for _, t := range h.trailer { + if bytes.Equal(kv.key, t.key) { + exclude = true + break + } + } + if !exclude && (h.noDefaultDate || !bytes.Equal(kv.key, strDate)) { dst = appendHeaderLine(dst, kv.key, kv.value) } } + if len(h.trailer) > 0 { + dst = appendHeaderLine(dst, strTrailer, appendArgsKeyBytes(nil, h.trailer, strCommaSpace)) + } + n := len(h.cookies) if n > 0 { for i := 0; i < n; i++ { @@ -1697,12 +2207,39 @@ func (h *RequestHeader) WriteTo(w io.Writer) (int64, error) { // Header returns request header representation. // -// The returned representation is valid until the next call to RequestHeader methods. +// Headers that set as Trailer will not represent. Use TrailerHeader for trailers. +// +// The returned value is valid until the request is released, +// either though ReleaseRequest or your request handler returning. +// Do not store references to returned value. Make copies instead. func (h *RequestHeader) Header() []byte { h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) return h.bufKV.value } +// writeTrailer writes request trailer to w. +func (h *RequestHeader) writeTrailer(w *bufio.Writer) error { + _, err := w.Write(h.TrailerHeader()) + return err +} + +// TrailerHeader returns request trailer header representation. +// +// Trailers will only be received with chunked transfer. +// +// The returned value is valid until the request is released, +// either though ReleaseRequest or your request handler returning. +// Do not store references to returned value. Make copies instead. +func (h *RequestHeader) TrailerHeader() []byte { + h.bufKV.value = h.bufKV.value[:0] + for _, t := range h.trailer { + value := h.peek(t.key) + h.bufKV.value = appendHeaderLine(h.bufKV.value, t.key, value) + } + h.bufKV.value = append(h.bufKV.value, strCRLF...) + return h.bufKV.value +} + // RawHeaders returns raw header key/value bytes. // // Depending on server configuration, header keys may be normalized to @@ -1743,7 +2280,7 @@ func (h *RequestHeader) AppendBytes(dst []byte) []byte { } contentType := h.ContentType() - if len(contentType) == 0 && !h.ignoreBody() { + if !h.noDefaultContentType && len(contentType) == 0 && !h.ignoreBody() { contentType = strDefaultContentType } if len(contentType) > 0 { @@ -1755,7 +2292,21 @@ func (h *RequestHeader) AppendBytes(dst []byte) []byte { for i, n := 0, len(h.h); i < n; i++ { kv := &h.h[i] - dst = appendHeaderLine(dst, kv.key, kv.value) + // Exclude trailer from header + exclude := false + for _, t := range h.trailer { + if bytes.Equal(kv.key, t.key) { + exclude = true + break + } + } + if !exclude { + dst = appendHeaderLine(dst, kv.key, kv.value) + } + } + + if len(h.trailer) > 0 { + dst = appendHeaderLine(dst, strTrailer, appendArgsKeyBytes(nil, h.trailer, strCommaSpace)) } // there is no need in h.collectCookies() here, since if cookies aren't collected yet, @@ -1794,6 +2345,43 @@ func (h *ResponseHeader) parse(buf []byte) (int, error) { return m + n, nil } +func (h *ResponseHeader) parseTrailer(buf []byte) (int, error) { + // Skip any 0 length chunk. + if buf[0] == '0' { + skip := len(strCRLF) + 1 + if len(buf) < skip { + return 0, io.EOF + } + buf = buf[skip:] + } + + var s headerScanner + s.b = buf + s.disableNormalizing = h.disableNormalizing + var err error + for s.next() { + if len(s.key) > 0 { + if bytes.IndexByte(s.key, ' ') != -1 || bytes.IndexByte(s.key, '\t') != -1 { + err = fmt.Errorf("invalid trailer key %q", s.key) + continue + } + // Forbidden by RFC 7230, section 4.1.2 + if isBadTrailer(s.key) { + err = fmt.Errorf("forbidden trailer key %q", s.key) + continue + } + } + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + } + if s.err != nil { + return 0, s.err + } + if err != nil { + return 0, err + } + return s.hLen, nil +} + func (h *RequestHeader) ignoreBody() bool { return h.IsGet() || h.IsHead() } @@ -1816,6 +2404,87 @@ func (h *RequestHeader) parse(buf []byte) (int, error) { return m + n, nil } +func (h *RequestHeader) parseTrailer(buf []byte) (int, error) { + // Skip any 0 length chunk. + if buf[0] == '0' { + skip := len(strCRLF) + 1 + if len(buf) < skip { + return 0, io.EOF + } + buf = buf[skip:] + } + + var s headerScanner + s.b = buf + s.disableNormalizing = h.disableNormalizing + var err error + for s.next() { + if len(s.key) > 0 { + if bytes.IndexByte(s.key, ' ') != -1 || bytes.IndexByte(s.key, '\t') != -1 { + err = fmt.Errorf("invalid trailer key %q", s.key) + continue + } + // Forbidden by RFC 7230, section 4.1.2 + if isBadTrailer(s.key) { + err = fmt.Errorf("forbidden trailer key %q", s.key) + continue + } + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + } + } + if s.err != nil { + return 0, s.err + } + if err != nil { + return 0, err + } + return s.hLen, nil +} + +func isBadTrailer(key []byte) bool { + if len(key) == 0 { + return true + } + + switch key[0] | 0x20 { + case 'a': + return caseInsensitiveCompare(key, strAuthorization) + case 'c': + if len(key) > len(HeaderContentType) && caseInsensitiveCompare(key[:8], strContentType[:8]) { + // skip compare prefix 'Content-' + return caseInsensitiveCompare(key[8:], strContentEncoding[8:]) || + caseInsensitiveCompare(key[8:], strContentLength[8:]) || + caseInsensitiveCompare(key[8:], strContentType[8:]) || + caseInsensitiveCompare(key[8:], strContentRange[8:]) + } + return caseInsensitiveCompare(key, strConnection) + case 'e': + return caseInsensitiveCompare(key, strExpect) + case 'h': + return caseInsensitiveCompare(key, strHost) + case 'k': + return caseInsensitiveCompare(key, strKeepAlive) + case 'm': + return caseInsensitiveCompare(key, strMaxForwards) + case 'p': + if len(key) > len(HeaderProxyConnection) && caseInsensitiveCompare(key[:6], strProxyConnection[:6]) { + // skip compare prefix 'Proxy-' + return caseInsensitiveCompare(key[6:], strProxyConnection[6:]) || + caseInsensitiveCompare(key[6:], strProxyAuthenticate[6:]) || + caseInsensitiveCompare(key[6:], strProxyAuthorization[6:]) + } + case 'r': + return caseInsensitiveCompare(key, strRange) + case 't': + return caseInsensitiveCompare(key, strTE) || + caseInsensitiveCompare(key, strTrailer) || + caseInsensitiveCompare(key, strTransferEncoding) + case 'w': + return caseInsensitiveCompare(key, strWWWAuthenticate) + } + return false +} + func (h *ResponseHeader) parseFirstLine(buf []byte) (int, error) { bNext := buf var b []byte @@ -1841,9 +2510,9 @@ func (h *ResponseHeader) parseFirstLine(buf []byte) (int, error) { h.statusCode, n, err = parseUintBuf(b) if err != nil { if h.secureErrorLogMessage { - return 0, fmt.Errorf("cannot parse response status code: %s", err) + return 0, fmt.Errorf("cannot parse response status code: %w", err) } - return 0, fmt.Errorf("cannot parse response status code: %s. Response %q", err, buf) + return 0, fmt.Errorf("cannot parse response status code: %w. Response %q", err, buf) } if len(b) > n && b[n] != ' ' { if h.secureErrorLogMessage { @@ -1851,6 +2520,9 @@ func (h *ResponseHeader) parseFirstLine(buf []byte) (int, error) { } return 0, fmt.Errorf("unexpected char at the end of status code. Response %q", buf) } + if len(b) > n+1 { + h.SetStatusMessage(b[n+1:]) + } return len(buf) - len(bNext), nil } @@ -1982,6 +2654,10 @@ func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { } continue } + if caseInsensitiveCompare(s.key, strTrailer) { + err = h.SetTrailerBytes(s.value) + continue + } } h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } @@ -2004,7 +2680,7 @@ func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { h.connectionClose = !hasHeaderValue(v, strKeepAlive) } - return len(buf) - len(s.b), nil + return len(buf) - len(s.b), err } func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { @@ -2070,6 +2746,14 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { } continue } + if caseInsensitiveCompare(s.key, strTrailer) { + if nerr := h.SetTrailerBytes(s.value); nerr != nil { + if err == nil { + err = nerr + } + } + continue + } } } h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) @@ -2113,13 +2797,15 @@ func (h *RequestHeader) collectCookies() { h.cookiesCollected = true } +var errNonNumericChars = errors.New("non-numeric chars found") + func parseContentLength(b []byte) (int, error) { v, n, err := parseUintBuf(b) if err != nil { - return -1, err + return -1, fmt.Errorf("cannot parse Content-Length: %w", err) } if n != len(b) { - return -1, fmt.Errorf("non-numeric chars at the end of Content-Length") + return -1, fmt.Errorf("cannot parse Content-Length: %w", errNonNumericChars) } return v, nil } @@ -2453,6 +3139,17 @@ func AppendNormalizedHeaderKeyBytes(dst, key []byte) []byte { return AppendNormalizedHeaderKey(dst, b2s(key)) } +func appendArgsKeyBytes(dst []byte, args []argsKV, sep []byte) []byte { + for i, n := 0, len(args); i < n; i++ { + kv := &args[i] + dst = append(dst, kv.key...) + if i+1 < n { + dst = append(dst, sep...) + } + } + return dst +} + var ( errNeedMore = errors.New("need more data: cannot find trailing lf") errInvalidName = errors.New("invalid header name") diff --git a/vendor/github.com/valyala/fasthttp/headers.go b/vendor/github.com/valyala/fasthttp/headers.go index 378dfec..676a0da 100644 --- a/vendor/github.com/valyala/fasthttp/headers.go +++ b/vendor/github.com/valyala/fasthttp/headers.go @@ -36,8 +36,9 @@ const ( HeaderVary = "Vary" // Connection management - HeaderConnection = "Connection" - HeaderKeepAlive = "Keep-Alive" + HeaderConnection = "Connection" + HeaderKeepAlive = "Keep-Alive" + HeaderProxyConnection = "Proxy-Connection" // Content negotiation HeaderAccept = "Accept" diff --git a/vendor/github.com/valyala/fasthttp/http.go b/vendor/github.com/valyala/fasthttp/http.go index 2511819..47431cd 100644 --- a/vendor/github.com/valyala/fasthttp/http.go +++ b/vendor/github.com/valyala/fasthttp/http.go @@ -17,6 +17,18 @@ import ( "github.com/valyala/bytebufferpool" ) +var ( + requestBodyPoolSizeLimit = -1 + responseBodyPoolSizeLimit = -1 +) + +// SetBodySizePoolLimit set the max body size for bodies to be returned to the pool. +// If the body size is larger it will be released instead of put back into the pool for reuse. +func SetBodySizePoolLimit(reqBodyLimit, respBodyLimit int) { + requestBodyPoolSizeLimit = reqBodyLimit + responseBodyPoolSizeLimit = respBodyLimit +} + // Request represents HTTP request. // // It is forbidden copying Request instances. Create new instances @@ -53,9 +65,12 @@ type Request struct { // Client/HostClient shouldn't use this field but should depend on the uri.scheme instead. isTLS bool - // Request timeout. Usually set by DoDealine or DoTimeout + // Request timeout. Usually set by DoDeadline or DoTimeout // if <= 0, means not set timeout time.Duration + + // Use Host header (request.Header.SetHost) instead of the host from SetRequestURI, SetHost, or URI().SetHost + UseHostHeader bool } // Response represents HTTP response. @@ -320,7 +335,9 @@ func (resp *Response) LocalAddr() net.Addr { // Body returns response body. // -// The returned body is valid until the response modification. +// The returned value is valid until the response is released, +// either though ReleaseResponse or your request handler returning. +// Do not store references to returned value. Make copies instead. func (resp *Response) Body() []byte { if resp.bodyStream != nil { bodyBuf := resp.bodyBuffer() @@ -638,7 +655,9 @@ func (req *Request) SwapBody(body []byte) []byte { // Body returns request body. // -// The returned body is valid until the request modification. +// The returned value is valid until the request is released, +// either though ReleaseRequest or your request handler returning. +// Do not store references to returned value. Make copies instead. func (req *Request) Body() []byte { if req.bodyRaw != nil { return req.bodyRaw @@ -725,6 +744,8 @@ func (req *Request) copyToSkipBody(dst *Request) { dst.parsedPostArgs = req.parsedPostArgs dst.isTLS = req.isTLS + dst.UseHostHeader = req.UseHostHeader + // do not copy multipartForm - it will be automatically // re-created on the first call to MultipartForm. } @@ -770,6 +791,20 @@ func (req *Request) URI() *URI { return &req.uri } +// SetURI initializes request URI +// Use this method if a single URI may be reused across multiple requests. +// Otherwise, you can just use SetRequestURI() and it will be parsed as new URI. +// The URI is copied and can be safely modified later. +func (req *Request) SetURI(newUri *URI) { + if newUri != nil { + newUri.CopyTo(&req.uri) + req.parsedURI = true + return + } + req.uri.Reset() + req.parsedURI = false +} + func (req *Request) parseURI() error { if req.parsedURI { return nil @@ -826,7 +861,7 @@ func (req *Request) MultipartForm() (*multipart.Form, error) { if bytes.Equal(ce, strGzip) { // Do not care about memory usage here. if bodyStream, err = gzip.NewReader(bodyStream); err != nil { - return nil, fmt.Errorf("cannot gunzip request body: %s", err) + return nil, fmt.Errorf("cannot gunzip request body: %w", err) } } else if len(ce) > 0 { return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce) @@ -835,14 +870,14 @@ func (req *Request) MultipartForm() (*multipart.Form, error) { mr := multipart.NewReader(bodyStream, req.multipartFormBoundary) req.multipartForm, err = mr.ReadForm(8 * 1024) if err != nil { - return nil, fmt.Errorf("cannot read multipart/form-data body: %s", err) + return nil, fmt.Errorf("cannot read multipart/form-data body: %w", err) } } else { body := req.bodyBytes() if bytes.Equal(ce, strGzip) { // Do not care about memory usage here. if body, err = AppendGunzipBytes(nil, body); err != nil { - return nil, fmt.Errorf("cannot gunzip request body: %s", err) + return nil, fmt.Errorf("cannot gunzip request body: %w", err) } } else if len(ce) > 0 { return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce) @@ -876,14 +911,14 @@ func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error { mw := multipart.NewWriter(w) if err := mw.SetBoundary(boundary); err != nil { - return fmt.Errorf("cannot use form boundary %q: %s", boundary, err) + return fmt.Errorf("cannot use form boundary %q: %w", boundary, err) } // marshal values for k, vv := range f.Value { for _, v := range vv { if err := mw.WriteField(k, v); err != nil { - return fmt.Errorf("cannot write form field %q value %q: %s", k, v, err) + return fmt.Errorf("cannot write form field %q value %q: %w", k, v, err) } } } @@ -893,23 +928,23 @@ func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error { for _, fv := range fvv { vw, err := mw.CreatePart(fv.Header) if err != nil { - return fmt.Errorf("cannot create form file %q (%q): %s", k, fv.Filename, err) + return fmt.Errorf("cannot create form file %q (%q): %w", k, fv.Filename, err) } fh, err := fv.Open() if err != nil { return fmt.Errorf("cannot open form file %q (%q): %s", k, fv.Filename, err) } if _, err = copyZeroAlloc(vw, fh); err != nil { - return fmt.Errorf("error when copying form file %q (%q): %s", k, fv.Filename, err) + return fmt.Errorf("error when copying form file %q (%q): %w", k, fv.Filename, err) } if err = fh.Close(); err != nil { - return fmt.Errorf("cannot close form file %q (%q): %s", k, fv.Filename, err) + return fmt.Errorf("cannot close form file %q (%q): %w", k, fv.Filename, err) } } } if err := mw.Close(); err != nil { - return fmt.Errorf("error when closing multipart form writer: %s", err) + return fmt.Errorf("error when closing multipart form writer: %w", err) } return nil @@ -927,16 +962,20 @@ func readMultipartForm(r io.Reader, boundary string, size, maxInMemoryFileSize i mr := multipart.NewReader(lr, boundary) f, err := mr.ReadForm(int64(maxInMemoryFileSize)) if err != nil { - return nil, fmt.Errorf("cannot read multipart/form-data body: %s", err) + return nil, fmt.Errorf("cannot read multipart/form-data body: %w", err) } return f, nil } // Reset clears request contents. func (req *Request) Reset() { + if requestBodyPoolSizeLimit >= 0 && req.body != nil { + req.ReleaseBody(requestBodyPoolSizeLimit) + } req.Header.Reset() req.resetSkipHeader() req.timeout = 0 + req.UseHostHeader = false } func (req *Request) resetSkipHeader() { @@ -962,6 +1001,9 @@ func (req *Request) RemoveMultipartFormFiles() { // Reset clears response contents. func (resp *Response) Reset() { + if responseBodyPoolSizeLimit >= 0 && resp.body != nil { + resp.ReleaseBody(responseBodyPoolSizeLimit) + } resp.Header.Reset() resp.resetSkipHeader() resp.SkipBody = false @@ -1118,18 +1160,46 @@ func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int, preParseM return nil } + if err = req.ReadBody(r, contentLength, maxBodySize); err != nil { + return err + } + + if req.Header.ContentLength() == -1 { + err = req.Header.ReadTrailer(r) + if err != nil && err != io.EOF { + return err + } + } + return nil +} + +// ReadBody reads request body from the given r, limiting the body size. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +func (req *Request) ReadBody(r *bufio.Reader, contentLength int, maxBodySize int) (err error) { bodyBuf := req.bodyBuffer() bodyBuf.Reset() - bodyBuf.B, err = readBody(r, contentLength, maxBodySize, bodyBuf.B) + + if contentLength >= 0 { + bodyBuf.B, err = readBody(r, contentLength, maxBodySize, bodyBuf.B) + + } else if contentLength == -1 { + bodyBuf.B, err = readBodyChunked(r, maxBodySize, bodyBuf.B) + + } else { + bodyBuf.B, err = readBodyIdentity(r, maxBodySize, bodyBuf.B) + req.Header.SetContentLength(len(bodyBuf.B)) + } + if err != nil { req.Reset() return err } - req.Header.SetContentLength(len(bodyBuf.B)) return nil } -// ContinueReadBody reads request body if request header contains +// ContinueReadBodyStream reads request body if request header contains // 'Expect: 100-continue'. // // The caller must send StatusContinue response before calling this method. @@ -1171,12 +1241,12 @@ func (req *Request) ContinueReadBodyStream(r *bufio.Reader, maxBodySize int, pre if err == ErrBodyTooLarge { req.Header.SetContentLength(contentLength) req.body = bodyBuf - req.bodyStream = acquireRequestStream(bodyBuf, r, contentLength) + req.bodyStream = acquireRequestStream(bodyBuf, r, &req.Header) return nil } if err == errChunkedStream { req.body = bodyBuf - req.bodyStream = acquireRequestStream(bodyBuf, r, -1) + req.bodyStream = acquireRequestStream(bodyBuf, r, &req.Header) return nil } req.Reset() @@ -1184,7 +1254,7 @@ func (req *Request) ContinueReadBodyStream(r *bufio.Reader, maxBodySize int, pre } req.body = bodyBuf - req.bodyStream = acquireRequestStream(bodyBuf, r, contentLength) + req.bodyStream = acquireRequestStream(bodyBuf, r, &req.Header) req.Header.SetContentLength(contentLength) return nil } @@ -1196,7 +1266,10 @@ func (resp *Response) Read(r *bufio.Reader) error { return resp.ReadLimitBody(r, 0) } -// ReadLimitBody reads response from the given r, limiting the body size. +// ReadLimitBody reads response headers from the given r, +// then reads the body using the ReadBody function and limiting the body size. +// +// If resp.SkipBody is true then it skips reading the response body. // // If maxBodySize > 0 and the body size exceeds maxBodySize, // then ErrBodyTooLarge is returned. @@ -1216,17 +1289,43 @@ func (resp *Response) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { } if !resp.mustSkipBody() { - bodyBuf := resp.bodyBuffer() - bodyBuf.Reset() - bodyBuf.B, err = readBody(r, resp.Header.ContentLength(), maxBodySize, bodyBuf.B) + err = resp.ReadBody(r, maxBodySize) if err != nil { return err } - resp.Header.SetContentLength(len(bodyBuf.B)) + } + + if resp.Header.ContentLength() == -1 { + err = resp.Header.ReadTrailer(r) + if err != nil && err != io.EOF { + return err + } } return nil } +// ReadBody reads response body from the given r, limiting the body size. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +func (resp *Response) ReadBody(r *bufio.Reader, maxBodySize int) (err error) { + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + + contentLength := resp.Header.ContentLength() + if contentLength >= 0 { + bodyBuf.B, err = readBody(r, contentLength, maxBodySize, bodyBuf.B) + + } else if contentLength == -1 { + bodyBuf.B, err = readBodyChunked(r, maxBodySize, bodyBuf.B) + + } else { + bodyBuf.B, err = readBodyIdentity(r, maxBodySize, bodyBuf.B) + resp.Header.SetContentLength(len(bodyBuf.B)) + } + return err +} + func (resp *Response) mustSkipBody() bool { return resp.SkipBody || resp.Header.mustSkipContentLength() } @@ -1319,10 +1418,15 @@ func (req *Request) Write(w *bufio.Writer) error { if len(req.Header.Host()) == 0 || req.parsedURI { uri := req.URI() host := uri.Host() - if len(host) == 0 { - return errRequestHostRequired + if len(req.Header.Host()) == 0 { + if len(host) == 0 { + return errRequestHostRequired + } else { + req.Header.SetHostBytes(host) + } + } else if !req.UseHostHeader { + req.Header.SetHostBytes(host) } - req.Header.SetHostBytes(host) req.Header.SetRequestURIBytes(uri.RequestURI()) if len(uri.username) > 0 { @@ -1354,7 +1458,7 @@ func (req *Request) Write(w *bufio.Writer) error { if req.onlyMultipartForm() { body, err = marshalMultipartForm(req.multipartForm, req.multipartFormBoundary) if err != nil { - return fmt.Errorf("error when marshaling multipart form: %s", err) + return fmt.Errorf("error when marshaling multipart form: %w", err) } req.Header.SetMultipartFormBoundary(req.multipartFormBoundary) } @@ -1685,9 +1789,13 @@ func (req *Request) writeBodyStream(w *bufio.Writer) error { } } else { req.Header.SetContentLength(-1) - if err = req.Header.Write(w); err == nil { + err = req.Header.Write(w) + if err == nil { err = writeBodyChunked(w, req.bodyStream) } + if err == nil { + err = req.Header.writeTrailer(w) + } } err1 := req.closeBodyStream() if err == nil { @@ -1741,6 +1849,9 @@ func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) (err error if err == nil && sendBody { err = writeBodyChunked(w, resp.bodyStream) } + if err == nil { + err = resp.Header.writeTrailer(w) + } } } err1 := resp.closeBodyStream() @@ -1889,12 +2000,13 @@ func writeChunk(w *bufio.Writer, b []byte) error { if _, err := w.Write(b); err != nil { return err } - _, err := w.Write(strCRLF) - err1 := w.Flush() - if err == nil { - err = err1 + // If is end chunk, write CRLF after writing trailer + if n > 0 { + if _, err := w.Write(strCRLF); err != nil { + return err + } } - return err + return w.Flush() } // ErrBodyTooLarge is returned if either request or response body exceeds @@ -1902,17 +2014,10 @@ func writeChunk(w *bufio.Writer, b []byte) error { var ErrBodyTooLarge = errors.New("body size exceeds the given limit") func readBody(r *bufio.Reader, contentLength int, maxBodySize int, dst []byte) ([]byte, error) { - dst = dst[:0] - if contentLength >= 0 { - if maxBodySize > 0 && contentLength > maxBodySize { - return dst, ErrBodyTooLarge - } - return appendBodyFixedSize(r, dst, contentLength) + if maxBodySize > 0 && contentLength > maxBodySize { + return dst, ErrBodyTooLarge } - if contentLength == -1 { - return readBodyChunked(r, maxBodySize, dst) - } - return readBodyIdentity(r, maxBodySize, dst) + return appendBodyFixedSize(r, dst, contentLength) } var errChunkedStream = errors.New("chunked stream") @@ -2029,6 +2134,9 @@ func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, erro if err != nil { return dst, err } + if chunkSize == 0 { + return dst, err + } if maxBodySize > 0 && len(dst)+chunkSize > maxBodySize { return dst, ErrBodyTooLarge } @@ -2042,9 +2150,6 @@ func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, erro } } dst = dst[:len(dst)-strCRLFLen] - if chunkSize == 0 { - return dst, nil - } } } @@ -2060,13 +2165,14 @@ func parseChunkSize(r *bufio.Reader) (int, error) { error: fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err), } } - // Skip any trailing whitespace after chunk size. - if c == ' ' { + // Skip chunk extension after chunk size. + // Add support later if anyone needs it. + if c != '\r' { continue } if err := r.UnreadByte(); err != nil { return -1, ErrBrokenChunk{ - error: fmt.Errorf("cannot unread '\r' char at the end of chunk size: %s", err), + error: fmt.Errorf("cannot unread '\r' char at the end of chunk size: %w", err), } } break @@ -2083,7 +2189,7 @@ func readCrLf(r *bufio.Reader) error { c, err := r.ReadByte() if err != nil { return ErrBrokenChunk{ - error: fmt.Errorf("cannot read %q char at the end of chunk size: %s", exp, err), + error: fmt.Errorf("cannot read %q char at the end of chunk size: %w", exp, err), } } if c != exp { diff --git a/vendor/github.com/valyala/fasthttp/server.go b/vendor/github.com/valyala/fasthttp/server.go index ab678ea..3b58542 100644 --- a/vendor/github.com/valyala/fasthttp/server.go +++ b/vendor/github.com/valyala/fasthttp/server.go @@ -248,6 +248,10 @@ type Server struct { // Deprecated: Use IdleTimeout instead. MaxKeepaliveDuration time.Duration + // MaxIdleWorkerDuration is the maximum idle time of a single worker in the underlying + // worker pool of the Server. Idle workers beyond this time will be cleared. + MaxIdleWorkerDuration time.Duration + // Period between tcp keep-alive messages. // // TCP keep-alive period is determined by operation system by default. @@ -340,7 +344,7 @@ type Server struct { // SleepWhenConcurrencyLimitsExceeded is a duration to be slept of if // the concurrency limit in exceeded (default [when is 0]: don't sleep - // and accept new connections immidiatelly). + // and accept new connections immediately). SleepWhenConcurrencyLimitsExceeded time.Duration // NoDefaultServerHeader, when set to true, causes the default Server header @@ -391,7 +395,17 @@ type Server struct { // By default standard logger from log package is used. Logger Logger - tlsConfig *tls.Config + // TLSConfig optionally provides a TLS configuration for use + // by ServeTLS, ServeTLSEmbed, ListenAndServeTLS, ListenAndServeTLSEmbed, + // AppendCert, AppendCertEmbed and NextProto. + // + // Note that this value is cloned by ServeTLS, ServeTLSEmbed, ListenAndServeTLS + // and ListenAndServeTLSEmbed, so it's not possible to modify the configuration + // with methods like tls.Config.SetSessionTicketKeys. + // To use SetSessionTicketKeys, use Server.Serve with a TLS Listener + // instead. + TLSConfig *tls.Config + nextProtos map[string]ServeHandler concurrency uint32 @@ -404,9 +418,12 @@ type Server struct { writerPool sync.Pool hijackConnPool sync.Pool - // We need to know our listeners so we can close them in Shutdown(). + // We need to know our listeners and idle connections so we can close them in Shutdown(). ln []net.Listener + idleConns map[net.Conn]struct{} + idleConnsMu sync.Mutex + mu sync.Mutex open int32 stop int32 @@ -698,6 +715,16 @@ func (ctx *RequestCtx) ResetUserValues() { ctx.userValues.Reset() } +// RemoveUserValue removes the given key and the value under it in ctx. +func (ctx *RequestCtx) RemoveUserValue(key string) { + ctx.userValues.Remove(key) +} + +// RemoveUserValueBytes removes the given key and the value under it in ctx. +func (ctx *RequestCtx) RemoveUserValueBytes(key []byte) { + ctx.userValues.RemoveBytes(key) +} + type connTLSer interface { Handshake() error ConnectionState() tls.ConnectionState @@ -750,12 +777,49 @@ func (ctx *RequestCtx) Conn() net.Conn { return ctx.c } +func (ctx *RequestCtx) reset() { + ctx.userValues.Reset() + ctx.Request.Reset() + ctx.Response.Reset() + ctx.fbr.reset() + + ctx.connID = 0 + ctx.connRequestNum = 0 + ctx.connTime = zeroTime + ctx.remoteAddr = nil + ctx.time = zeroTime + ctx.c = nil + + // Don't reset ctx.s! + // We have a pool per server so the next time this ctx is used it + // will be assigned the same value again. + // ctx might still be in use for context.Done() and context.Err() + // which are safe to use as they only use ctx.s and no other value. + + if ctx.timeoutResponse != nil { + ctx.timeoutResponse.Reset() + } + + if ctx.timeoutTimer != nil { + stopTimer(ctx.timeoutTimer) + } + + ctx.hijackHandler = nil + ctx.hijackNoResponse = false +} + type firstByteReader struct { c net.Conn ch byte byteRead bool } +func (r *firstByteReader) reset() { + r.c = nil + r.ch = 0 + r.byteRead = false +} + func (r *firstByteReader) Read(b []byte) (int, error) { if len(b) == 0 { return 0, nil @@ -859,40 +923,42 @@ func (ctx *RequestCtx) SetContentTypeBytes(contentType []byte) { // RequestURI returns RequestURI. // -// This uri is valid until returning from RequestHandler. +// The returned bytes are valid until your request handler returns. func (ctx *RequestCtx) RequestURI() []byte { return ctx.Request.Header.RequestURI() } // URI returns requested uri. // -// The uri is valid until returning from RequestHandler. +// This uri is valid until your request handler returns. func (ctx *RequestCtx) URI() *URI { return ctx.Request.URI() } // Referer returns request referer. // -// The referer is valid until returning from RequestHandler. +// The returned bytes are valid until your request handler returns. func (ctx *RequestCtx) Referer() []byte { return ctx.Request.Header.Referer() } // UserAgent returns User-Agent header value from the request. +// +// The returned bytes are valid until your request handler returns. func (ctx *RequestCtx) UserAgent() []byte { return ctx.Request.Header.UserAgent() } // Path returns requested path. // -// The path is valid until returning from RequestHandler. +// The returned bytes are valid until your request handler returns. func (ctx *RequestCtx) Path() []byte { return ctx.URI().Path() } // Host returns requested host. // -// The host is valid until returning from RequestHandler. +// The returned bytes are valid until your request handler returns. func (ctx *RequestCtx) Host() []byte { return ctx.URI().Host() } @@ -901,9 +967,9 @@ func (ctx *RequestCtx) Host() []byte { // // It doesn't return POST'ed arguments - use PostArgs() for this. // -// Returned arguments are valid until returning from RequestHandler. -// // See also PostArgs, FormValue and FormFile. +// +// These args are valid until your request handler returns. func (ctx *RequestCtx) QueryArgs() *Args { return ctx.URI().QueryArgs() } @@ -912,9 +978,9 @@ func (ctx *RequestCtx) QueryArgs() *Args { // // It doesn't return query arguments from RequestURI - use QueryArgs for this. // -// Returned arguments are valid until returning from RequestHandler. -// // See also QueryArgs, FormValue and FormFile. +// +// These args are valid until your request handler returns. func (ctx *RequestCtx) PostArgs() *Args { return ctx.Request.PostArgs() } @@ -930,7 +996,7 @@ func (ctx *RequestCtx) PostArgs() *Args { // // Use SaveMultipartFile function for permanently saving uploaded file. // -// The returned form is valid until returning from RequestHandler. +// The returned form is valid until your request handler returns. // // See also FormFile and FormValue. func (ctx *RequestCtx) MultipartForm() (*multipart.Form, error) { @@ -944,7 +1010,7 @@ func (ctx *RequestCtx) MultipartForm() (*multipart.Form, error) { // // Use SaveMultipartFile function for permanently saving uploaded file. // -// The returned file header is valid until returning from RequestHandler. +// The returned file header is valid until your request handler returns. func (ctx *RequestCtx) FormFile(key string) (*multipart.FileHeader, error) { mf, err := ctx.MultipartForm() if err != nil { @@ -1028,7 +1094,7 @@ func SaveMultipartFile(fh *multipart.FileHeader, path string) (err error) { // * MultipartForm for obtaining values from multipart form. // * FormFile for obtaining uploaded files. // -// The returned value is valid until returning from RequestHandler. +// The returned value is valid until your request handler returns. func (ctx *RequestCtx) FormValue(key string) []byte { v := ctx.QueryArgs().Peek(key) if len(v) > 0 { @@ -1090,7 +1156,7 @@ func (ctx *RequestCtx) IsPatch() bool { // Method return request method. // -// Returned value is valid until returning from RequestHandler. +// Returned value is valid until your request handler returns. func (ctx *RequestCtx) Method() []byte { return ctx.Request.Header.Method() } @@ -1277,6 +1343,10 @@ func (ctx *RequestCtx) ResetBody() { // SendFile logs all the errors via ctx.Logger. // // See also ServeFile, FSHandler and FS. +// +// WARNING: do not pass any user supplied paths to this function! +// WARNING: if path is based on user input users will be able to request +// any file on your filesystem! Use fasthttp.FS with a sane Root instead. func (ctx *RequestCtx) SendFile(path string) { ServeFile(ctx, path) } @@ -1288,6 +1358,10 @@ func (ctx *RequestCtx) SendFile(path string) { // SendFileBytes logs all the errors via ctx.Logger. // // See also ServeFileBytes, FSHandler and FS. +// +// WARNING: do not pass any user supplied paths to this function! +// WARNING: if path is based on user input users will be able to request +// any file on your filesystem! Use fasthttp.FS with a sane Root instead. func (ctx *RequestCtx) SendFileBytes(path []byte) { ServeFileBytes(ctx, path) } @@ -1336,7 +1410,7 @@ func (ctx *RequestCtx) WriteString(s string) (int, error) { // PostBody returns POST request body. // -// The returned value is valid until RequestHandler return. +// The returned bytes are valid until your request handler returns. func (ctx *RequestCtx) PostBody() []byte { return ctx.Request.Body() } @@ -1386,7 +1460,7 @@ func (ctx *RequestCtx) IsBodyStream() bool { // It is safe re-using returned logger for logging multiple messages // for the current request. // -// The returned logger is valid until returning from RequestHandler. +// The returned logger is valid until your request handler returns. func (ctx *RequestCtx) Logger() Logger { if ctx.logger.ctx == nil { ctx.logger.ctx = ctx @@ -1452,8 +1526,9 @@ func (s *Server) NextProto(key string, nph ServeHandler) { if s.nextProtos == nil { s.nextProtos = make(map[string]ServeHandler) } + s.configTLS() - s.tlsConfig.NextProtos = append(s.tlsConfig.NextProtos, key) + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, key) s.nextProtos[key] = nph } @@ -1535,14 +1610,14 @@ func (s *Server) ListenAndServe(addr string) error { // The server sets the given file mode for the UNIX addr. func (s *Server) ListenAndServeUNIX(addr string, mode os.FileMode) error { if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unexpected error when trying to remove unix socket file %q: %s", addr, err) + return fmt.Errorf("unexpected error when trying to remove unix socket file %q: %w", addr, err) } ln, err := net.Listen("unix", addr) if err != nil { return err } if err = os.Chmod(addr, mode); err != nil { - return fmt.Errorf("cannot chmod %#o for %q: %s", mode, addr, err) + return fmt.Errorf("cannot chmod %#o for %q: %w", mode, addr, err) } return s.Serve(ln) } @@ -1612,19 +1687,19 @@ func (s *Server) ServeTLS(ln net.Listener, certFile, keyFile string) error { s.mu.Unlock() return err } - if s.tlsConfig == nil { + if s.TLSConfig == nil { s.mu.Unlock() return errNoCertOrKeyProvided } // BuildNameToCertificate has been deprecated since 1.14. // But since we also support older versions we'll keep this here. - s.tlsConfig.BuildNameToCertificate() //nolint:staticcheck + s.TLSConfig.BuildNameToCertificate() //nolint:staticcheck s.mu.Unlock() return s.Serve( - tls.NewListener(ln, s.tlsConfig), + tls.NewListener(ln, s.TLSConfig.Clone()), ) } @@ -1642,19 +1717,19 @@ func (s *Server) ServeTLSEmbed(ln net.Listener, certData, keyData []byte) error s.mu.Unlock() return err } - if s.tlsConfig == nil { + if s.TLSConfig == nil { s.mu.Unlock() return errNoCertOrKeyProvided } // BuildNameToCertificate has been deprecated since 1.14. // But since we also support older versions we'll keep this here. - s.tlsConfig.BuildNameToCertificate() //nolint:staticcheck + s.TLSConfig.BuildNameToCertificate() //nolint:staticcheck s.mu.Unlock() return s.Serve( - tls.NewListener(ln, s.tlsConfig), + tls.NewListener(ln, s.TLSConfig.Clone()), ) } @@ -1669,12 +1744,12 @@ func (s *Server) AppendCert(certFile, keyFile string) error { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { - return fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err) + return fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %w", certFile, keyFile, err) } s.configTLS() + s.TLSConfig.Certificates = append(s.TLSConfig.Certificates, cert) - s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) return nil } @@ -1691,16 +1766,14 @@ func (s *Server) AppendCertEmbed(certData, keyData []byte) error { } s.configTLS() + s.TLSConfig.Certificates = append(s.TLSConfig.Certificates, cert) - s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) return nil } func (s *Server) configTLS() { - if s.tlsConfig == nil { - s.tlsConfig = &tls.Config{ - PreferServerCipherSuites: true, - } + if s.TLSConfig == nil { + s.TLSConfig = &tls.Config{} } } @@ -1733,11 +1806,12 @@ func (s *Server) Serve(ln net.Listener) error { s.mu.Unlock() wp := &workerPool{ - WorkerFunc: s.serveConn, - MaxWorkersCount: maxWorkersCount, - LogAllErrors: s.LogAllErrors, - Logger: s.logger(), - connState: s.setState, + WorkerFunc: s.serveConn, + MaxWorkersCount: maxWorkersCount, + LogAllErrors: s.LogAllErrors, + MaxIdleWorkerDuration: s.MaxIdleWorkerDuration, + Logger: s.logger(), + connState: s.setState, } wp.Start() @@ -1814,6 +1888,8 @@ func (s *Server) Shutdown() error { close(s.done) } + s.closeIdleConns() + // Closing the listener will make Serve() call Stop on the worker pool. // Setting .stop to 1 will make serveConn() break out of its loop. // Now we just have to wait until all workers are done. @@ -2053,7 +2129,6 @@ func (s *Server) serveConn(c net.Conn) (err error) { connectionClose bool isHTTP11 bool - reqReset bool continueReadingRequest bool = true ) for { @@ -2273,11 +2348,9 @@ func (s *Server) serveConn(c net.Conn) (err error) { timeoutResponse.CopyTo(&ctx.Response) } - if !ctx.IsGet() && ctx.IsHead() { + if ctx.IsHead() { ctx.Response.SkipBody = true } - reqReset = true - ctx.Request.Reset() hijackHandler = ctx.hijackHandler ctx.hijackHandler = nil @@ -2301,7 +2374,6 @@ func (s *Server) serveConn(c net.Conn) (err error) { previousWriteTimeout = 0 } - connectionClose = connectionClose || ctx.Response.ConnectionClose() connectionClose = connectionClose || ctx.Response.ConnectionClose() || (s.CloseOnShutdown && atomic.LoadInt32(&s.stop) == 1) if connectionClose { ctx.Response.Header.SetCanonical(strConnection, strClose) @@ -2349,9 +2421,6 @@ func (s *Server) serveConn(c net.Conn) (err error) { if br != nil { hjr = br br = nil - - // br may point to ctx.fbr, so do not return ctx into pool below. - ctx = nil } if bw != nil { err = bw.Flush() @@ -2365,7 +2434,7 @@ func (s *Server) serveConn(c net.Conn) (err error) { if err != nil { break } - go hijackConnHandler(hjr, c, s, hijackHandler) + go hijackConnHandler(ctx, hjr, c, s, hijackHandler) err = errHijacked break } @@ -2378,6 +2447,8 @@ func (s *Server) serveConn(c net.Conn) (err error) { s.setState(c, StateIdle) ctx.userValues.Reset() + ctx.Request.Reset() + ctx.Response.Reset() if atomic.LoadInt32(&s.stop) == 1 { err = nil @@ -2391,25 +2462,21 @@ func (s *Server) serveConn(c net.Conn) (err error) { if bw != nil { releaseWriter(s, bw) } - if ctx != nil { - // in unexpected cases the for loop will break - // before request reset call. in such cases, call it before - // release to fix #548 - if !reqReset { - ctx.Request.Reset() - } + if hijackHandler == nil { s.releaseCtx(ctx) } + return } func (s *Server) setState(nc net.Conn, state ConnState) { + s.trackConn(nc, state) if hook := s.ConnState; hook != nil { hook(nc, state) } } -func hijackConnHandler(r io.Reader, c net.Conn, s *Server, h HijackHandler) { +func hijackConnHandler(ctx *RequestCtx, r io.Reader, c net.Conn, s *Server, h HijackHandler) { hjc := s.acquireHijackConn(r, c) h(hjc) @@ -2420,6 +2487,7 @@ func hijackConnHandler(r io.Reader, c net.Conn, s *Server, h HijackHandler) { c.Close() s.releaseHijackConn(hjc) } + s.releaseCtx(ctx) } func (s *Server) acquireHijackConn(r io.Reader, c net.Conn) *hijackConn { @@ -2483,7 +2551,7 @@ func writeResponse(ctx *RequestCtx, w *bufio.Writer) error { panic("BUG: cannot write timed out response") } err := ctx.Response.Write(w) - ctx.Response.Reset() + return err } @@ -2564,17 +2632,19 @@ func releaseWriter(s *Server, w *bufio.Writer) { func (s *Server) acquireCtx(c net.Conn) (ctx *RequestCtx) { v := s.ctxPool.Get() if v == nil { - ctx = &RequestCtx{ - s: s, - } keepBodyBuffer := !s.ReduceMemoryUsage + + ctx = new(RequestCtx) ctx.Request.keepBodyBuffer = keepBodyBuffer ctx.Response.keepBodyBuffer = keepBodyBuffer } else { ctx = v.(*RequestCtx) } + + ctx.s = s ctx.c = c - return + + return ctx } // Init2 prepares ctx for passing to RequestHandler. @@ -2697,10 +2767,8 @@ func (s *Server) releaseCtx(ctx *RequestCtx) { if ctx.timeoutResponse != nil { panic("BUG: cannot release timed out RequestCtx") } - ctx.c = nil - ctx.remoteAddr = nil - ctx.fbr.c = nil - ctx.userValues.Reset() + + ctx.reset() s.ctxPool.Put(ctx) } @@ -2720,7 +2788,7 @@ func (s *Server) getServerName() []byte { } func (s *Server) writeFastError(w io.Writer, statusCode int, msg string) { - w.Write(statusLine(statusCode)) //nolint:errcheck + w.Write(formatStatusLine(nil, strHTTP11, statusCode, s2b(StatusMessage(statusCode)))) //nolint:errcheck server := "" if !s.NoDefaultServerHeader { @@ -2768,11 +2836,38 @@ func (s *Server) writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, serverNam if bw == nil { bw = acquireWriter(ctx) } + writeResponse(ctx, bw) //nolint:errcheck + ctx.Response.Reset() bw.Flush() + return bw } +func (s *Server) trackConn(c net.Conn, state ConnState) { + s.idleConnsMu.Lock() + switch state { + case StateIdle: + if s.idleConns == nil { + s.idleConns = make(map[net.Conn]struct{}) + } + s.idleConns[c] = struct{}{} + + default: + delete(s.idleConns, c) + } + s.idleConnsMu.Unlock() +} + +func (s *Server) closeIdleConns() { + s.idleConnsMu.Lock() + for c := range s.idleConns { + _ = c.Close() + } + s.idleConns = nil + s.idleConnsMu.Unlock() +} + // A ConnState represents the state of a client connection to a server. // It's used by the optional Server.ConnState hook. type ConnState int diff --git a/vendor/github.com/valyala/fasthttp/status.go b/vendor/github.com/valyala/fasthttp/status.go index 28d1286..c88ba11 100644 --- a/vendor/github.com/valyala/fasthttp/status.go +++ b/vendor/github.com/valyala/fasthttp/status.go @@ -1,7 +1,6 @@ package fasthttp import ( - "fmt" "strconv" ) @@ -81,7 +80,7 @@ const ( ) var ( - statusLines = make([][]byte, statusMessageMax+1) + unknownStatusCode = "Unknown Status Code" statusMessages = []string{ StatusContinue: "Continue", @@ -155,39 +154,24 @@ var ( // StatusMessage returns HTTP status message for the given status code. func StatusMessage(statusCode int) string { if statusCode < statusMessageMin || statusCode > statusMessageMax { - return "Unknown Status Code" + return unknownStatusCode } - s := statusMessages[statusCode] - if s == "" { - s = "Unknown Status Code" + if s := statusMessages[statusCode]; s != "" { + return s } - return s + return unknownStatusCode } -func init() { - // Fill all valid status lines - for i := 0; i < len(statusLines); i++ { - statusLines[i] = []byte(fmt.Sprintf("HTTP/1.1 %d %s\r\n", i, StatusMessage(i))) +func formatStatusLine(dst []byte, protocol []byte, statusCode int, statusText []byte) []byte { + dst = append(dst, protocol...) + dst = append(dst, ' ') + dst = strconv.AppendInt(dst, int64(statusCode), 10) + dst = append(dst, ' ') + if len(statusText) == 0 { + dst = append(dst, s2b(StatusMessage(statusCode))...) + } else { + dst = append(dst, statusText...) } -} - -func statusLine(statusCode int) []byte { - if statusCode < 0 || statusCode > statusMessageMax { - return invalidStatusLine(statusCode) - } - - return statusLines[statusCode] -} - -func invalidStatusLine(statusCode int) []byte { - statusText := StatusMessage(statusCode) - // xxx placeholder of status code - var line = make([]byte, 0, len("HTTP/1.1 xxx \r\n")+len(statusText)) - line = append(line, "HTTP/1.1 "...) - line = strconv.AppendInt(line, int64(statusCode), 10) - line = append(line, ' ') - line = append(line, statusText...) - line = append(line, "\r\n"...) - return line + return append(dst, strCRLF...) } diff --git a/vendor/github.com/valyala/fasthttp/streaming.go b/vendor/github.com/valyala/fasthttp/streaming.go index 1a3d748..11750a9 100644 --- a/vendor/github.com/valyala/fasthttp/streaming.go +++ b/vendor/github.com/valyala/fasthttp/streaming.go @@ -10,10 +10,10 @@ import ( ) type requestStream struct { + header *RequestHeader prefetchedBytes *bytes.Reader reader *bufio.Reader totalBytesRead int - contentLength int chunkLeft int } @@ -22,18 +22,18 @@ func (rs *requestStream) Read(p []byte) (int, error) { n int err error ) - if rs.contentLength == -1 { + if rs.header.contentLength == -1 { if rs.chunkLeft == 0 { chunkSize, err := parseChunkSize(rs.reader) if err != nil { return 0, err } if chunkSize == 0 { - err = readCrLf(rs.reader) - if err == nil { - err = io.EOF + err = rs.header.ReadTrailer(rs.reader) + if err != nil && err != io.EOF { + return 0, err } - return 0, err + return 0, io.EOF } rs.chunkLeft = chunkSize } @@ -52,7 +52,7 @@ func (rs *requestStream) Read(p []byte) (int, error) { } return n, err } - if rs.totalBytesRead == rs.contentLength { + if rs.totalBytesRead == rs.header.contentLength { return 0, io.EOF } prefetchedSize := int(rs.prefetchedBytes.Size()) @@ -63,12 +63,12 @@ func (rs *requestStream) Read(p []byte) (int, error) { } n, err := rs.prefetchedBytes.Read(p) rs.totalBytesRead += n - if n == rs.contentLength { + if n == rs.header.contentLength { return n, io.EOF } return n, err } else { - left := rs.contentLength - rs.totalBytesRead + left := rs.header.contentLength - rs.totalBytesRead if len(p) > left { p = p[:left] } @@ -79,18 +79,17 @@ func (rs *requestStream) Read(p []byte) (int, error) { } } - if rs.totalBytesRead == rs.contentLength { + if rs.totalBytesRead == rs.header.contentLength { err = io.EOF } return n, err } -func acquireRequestStream(b *bytebufferpool.ByteBuffer, r *bufio.Reader, contentLength int) *requestStream { +func acquireRequestStream(b *bytebufferpool.ByteBuffer, r *bufio.Reader, h *RequestHeader) *requestStream { rs := requestStreamPool.Get().(*requestStream) rs.prefetchedBytes = bytes.NewReader(b.B) rs.reader = r - rs.contentLength = contentLength - + rs.header = h return rs } diff --git a/vendor/github.com/valyala/fasthttp/strings.go b/vendor/github.com/valyala/fasthttp/strings.go index 7f12cbc..370e307 100644 --- a/vendor/github.com/valyala/fasthttp/strings.go +++ b/vendor/github.com/valyala/fasthttp/strings.go @@ -7,44 +7,56 @@ var ( ) var ( - strSlash = []byte("/") - strSlashSlash = []byte("//") - strSlashDotDot = []byte("/..") - strSlashDotSlash = []byte("/./") - strSlashDotDotSlash = []byte("/../") - strCRLF = []byte("\r\n") - strHTTP = []byte("http") - strHTTPS = []byte("https") - strHTTP10 = []byte("HTTP/1.0") - strHTTP11 = []byte("HTTP/1.1") - strColon = []byte(":") - strColonSlashSlash = []byte("://") - strColonSpace = []byte(": ") - strGMT = []byte("GMT") + strSlash = []byte("/") + strSlashSlash = []byte("//") + strSlashDotDot = []byte("/..") + strSlashDotSlash = []byte("/./") + strSlashDotDotSlash = []byte("/../") + strBackSlashDotDot = []byte(`\..`) + strBackSlashDotBackSlash = []byte(`\.\`) + strSlashDotDotBackSlash = []byte(`/..\`) + strBackSlashDotDotBackSlash = []byte(`\..\`) + strCRLF = []byte("\r\n") + strHTTP = []byte("http") + strHTTPS = []byte("https") + strHTTP10 = []byte("HTTP/1.0") + strHTTP11 = []byte("HTTP/1.1") + strColon = []byte(":") + strColonSlashSlash = []byte("://") + strColonSpace = []byte(": ") + strCommaSpace = []byte(", ") + strGMT = []byte("GMT") strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n") - strExpect = []byte(HeaderExpect) - strConnection = []byte(HeaderConnection) - strContentLength = []byte(HeaderContentLength) - strContentType = []byte(HeaderContentType) - strDate = []byte(HeaderDate) - strHost = []byte(HeaderHost) - strReferer = []byte(HeaderReferer) - strServer = []byte(HeaderServer) - strTransferEncoding = []byte(HeaderTransferEncoding) - strContentEncoding = []byte(HeaderContentEncoding) - strAcceptEncoding = []byte(HeaderAcceptEncoding) - strUserAgent = []byte(HeaderUserAgent) - strCookie = []byte(HeaderCookie) - strSetCookie = []byte(HeaderSetCookie) - strLocation = []byte(HeaderLocation) - strIfModifiedSince = []byte(HeaderIfModifiedSince) - strLastModified = []byte(HeaderLastModified) - strAcceptRanges = []byte(HeaderAcceptRanges) - strRange = []byte(HeaderRange) - strContentRange = []byte(HeaderContentRange) - strAuthorization = []byte(HeaderAuthorization) + strExpect = []byte(HeaderExpect) + strConnection = []byte(HeaderConnection) + strContentLength = []byte(HeaderContentLength) + strContentType = []byte(HeaderContentType) + strDate = []byte(HeaderDate) + strHost = []byte(HeaderHost) + strReferer = []byte(HeaderReferer) + strServer = []byte(HeaderServer) + strTransferEncoding = []byte(HeaderTransferEncoding) + strContentEncoding = []byte(HeaderContentEncoding) + strAcceptEncoding = []byte(HeaderAcceptEncoding) + strUserAgent = []byte(HeaderUserAgent) + strCookie = []byte(HeaderCookie) + strSetCookie = []byte(HeaderSetCookie) + strLocation = []byte(HeaderLocation) + strIfModifiedSince = []byte(HeaderIfModifiedSince) + strLastModified = []byte(HeaderLastModified) + strAcceptRanges = []byte(HeaderAcceptRanges) + strRange = []byte(HeaderRange) + strContentRange = []byte(HeaderContentRange) + strAuthorization = []byte(HeaderAuthorization) + strTE = []byte(HeaderTE) + strTrailer = []byte(HeaderTrailer) + strMaxForwards = []byte(HeaderMaxForwards) + strProxyConnection = []byte(HeaderProxyConnection) + strProxyAuthenticate = []byte(HeaderProxyAuthenticate) + strProxyAuthorization = []byte(HeaderProxyAuthorization) + strWWWAuthenticate = []byte(HeaderWWWAuthenticate) strCookieExpires = []byte("expires") strCookieDomain = []byte("domain") @@ -73,10 +85,10 @@ var ( strBytes = []byte("bytes") strBasicSpace = []byte("Basic ") - strApplicationSlash = []byte("application/") - strImageSVG = []byte("image/svg") - strImageIcon = []byte("image/x-icon") - strFontSlash = []byte("font/") - strMultipartSlash = []byte("multipart/") - strTextSlash = []byte("text/") + strApplicationSlash = []byte("application/") + strImageSVG = []byte("image/svg") + strImageIcon = []byte("image/x-icon") + strFontSlash = []byte("font/") + strMultipartSlash = []byte("multipart/") + strTextSlash = []byte("text/") ) diff --git a/vendor/github.com/valyala/fasthttp/tcpdialer.go b/vendor/github.com/valyala/fasthttp/tcpdialer.go index 2317aca..294cf14 100644 --- a/vendor/github.com/valyala/fasthttp/tcpdialer.go +++ b/vendor/github.com/valyala/fasthttp/tcpdialer.go @@ -127,7 +127,7 @@ type Resolver interface { // TCPDialer contains options to control a group of Dial calls. type TCPDialer struct { - // Concurrency controls the maximum number of concurrent Dails + // Concurrency controls the maximum number of concurrent Dials // that can be performed using this object. // Setting this to 0 means unlimited. // @@ -156,8 +156,7 @@ type TCPDialer struct { // DNSCacheDuration may be used to override the default DNS cache duration (DefaultDNSCacheDuration) DNSCacheDuration time.Duration - tcpAddrsLock sync.Mutex - tcpAddrsMap map[string]*tcpAddrEntry + tcpAddrsMap sync.Map concurrencyCh chan struct{} @@ -280,7 +279,6 @@ func (d *TCPDialer) dial(addr string, dualStack bool, timeout time.Duration) (ne d.DNSCacheDuration = DefaultDNSCacheDuration } - d.tcpAddrsMap = make(map[string]*tcpAddrEntry) go d.tcpAddrsClean() }) @@ -360,8 +358,8 @@ type tcpAddrEntry struct { addrs []net.TCPAddr addrsIdx uint32 + pending int32 resolveTime time.Time - pending bool } // DefaultDNSCacheDuration is the duration for caching resolved TCP addresses @@ -373,35 +371,35 @@ func (d *TCPDialer) tcpAddrsClean() { for { time.Sleep(time.Second) t := time.Now() - - d.tcpAddrsLock.Lock() - for k, e := range d.tcpAddrsMap { - if t.Sub(e.resolveTime) > expireDuration { - delete(d.tcpAddrsMap, k) + d.tcpAddrsMap.Range(func(k, v interface{}) bool { + if e, ok := v.(*tcpAddrEntry); ok && t.Sub(e.resolveTime) > expireDuration { + d.tcpAddrsMap.Delete(k) } - } - d.tcpAddrsLock.Unlock() + return true + }) + } } func (d *TCPDialer) getTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, uint32, error) { - d.tcpAddrsLock.Lock() - e := d.tcpAddrsMap[addr] - if e != nil && !e.pending && time.Since(e.resolveTime) > d.DNSCacheDuration { - e.pending = true - e = nil + item, exist := d.tcpAddrsMap.Load(addr) + e, ok := item.(*tcpAddrEntry) + if exist && ok && e != nil && time.Since(e.resolveTime) > d.DNSCacheDuration { + // Only let one goroutine re-resolve at a time. + if atomic.SwapInt32(&e.pending, 1) == 0 { + e = nil + } } - d.tcpAddrsLock.Unlock() if e == nil { addrs, err := resolveTCPAddrs(addr, dualStack, d.Resolver) if err != nil { - d.tcpAddrsLock.Lock() - e = d.tcpAddrsMap[addr] - if e != nil && e.pending { - e.pending = false + item, exist := d.tcpAddrsMap.Load(addr) + e, ok = item.(*tcpAddrEntry) + if exist && ok && e != nil { + // Set pending to 0 so another goroutine can retry. + atomic.StoreInt32(&e.pending, 0) } - d.tcpAddrsLock.Unlock() return nil, 0, err } @@ -409,10 +407,7 @@ func (d *TCPDialer) getTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, uin addrs: addrs, resolveTime: time.Now(), } - - d.tcpAddrsLock.Lock() - d.tcpAddrsMap[addr] = e - d.tcpAddrsLock.Unlock() + d.tcpAddrsMap.Store(addr, e) } idx := atomic.AddUint32(&e.addrsIdx, 1) diff --git a/vendor/github.com/valyala/fasthttp/uri.go b/vendor/github.com/valyala/fasthttp/uri.go index b57b1f7..38a431e 100644 --- a/vendor/github.com/valyala/fasthttp/uri.go +++ b/vendor/github.com/valyala/fasthttp/uri.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "io" + "path/filepath" + "strconv" "sync" ) @@ -69,14 +71,14 @@ type URI struct { // CopyTo copies uri contents to dst. func (u *URI) CopyTo(dst *URI) { dst.Reset() - dst.pathOriginal = append(dst.pathOriginal[:0], u.pathOriginal...) - dst.scheme = append(dst.scheme[:0], u.scheme...) - dst.path = append(dst.path[:0], u.path...) - dst.queryString = append(dst.queryString[:0], u.queryString...) - dst.hash = append(dst.hash[:0], u.hash...) - dst.host = append(dst.host[:0], u.host...) - dst.username = append(dst.username[:0], u.username...) - dst.password = append(dst.password[:0], u.password...) + dst.pathOriginal = append(dst.pathOriginal, u.pathOriginal...) + dst.scheme = append(dst.scheme, u.scheme...) + dst.path = append(dst.path, u.path...) + dst.queryString = append(dst.queryString, u.queryString...) + dst.hash = append(dst.hash, u.hash...) + dst.host = append(dst.host, u.host...) + dst.username = append(dst.username, u.username...) + dst.password = append(dst.password, u.password...) u.queryArgs.CopyTo(&dst.queryArgs) dst.parsedQueryArgs = u.parsedQueryArgs @@ -88,7 +90,7 @@ func (u *URI) CopyTo(dst *URI) { // Hash returns URI hash, i.e. qwe of http://aaa.com/foo/bar?baz=123#qwe . // -// The returned value is valid until the next URI method call. +// The returned bytes are valid until the next URI method call. func (u *URI) Hash() []byte { return u.hash } @@ -104,6 +106,8 @@ func (u *URI) SetHashBytes(hash []byte) { } // Username returns URI username +// +// The returned bytes are valid until the next URI method call. func (u *URI) Username() []byte { return u.username } @@ -119,6 +123,8 @@ func (u *URI) SetUsernameBytes(username []byte) { } // Password returns URI password +// +// The returned bytes are valid until the next URI method call. func (u *URI) Password() []byte { return u.password } @@ -136,7 +142,7 @@ func (u *URI) SetPasswordBytes(password []byte) { // QueryString returns URI query string, // i.e. baz=123 of http://aaa.com/foo/bar?baz=123#qwe . // -// The returned value is valid until the next URI method call. +// The returned bytes are valid until the next URI method call. func (u *URI) QueryString() []byte { return u.queryString } @@ -158,7 +164,7 @@ func (u *URI) SetQueryStringBytes(queryString []byte) { // The returned path is always urldecoded and normalized, // i.e. '//f%20obar/baz/../zzz' becomes '/f obar/zzz'. // -// The returned value is valid until the next URI method call. +// The returned bytes are valid until the next URI method call. func (u *URI) Path() []byte { path := u.path if len(path) == 0 { @@ -181,7 +187,7 @@ func (u *URI) SetPathBytes(path []byte) { // PathOriginal returns the original path from requestURI passed to URI.Parse(). // -// The returned value is valid until the next URI method call. +// The returned bytes are valid until the next URI method call. func (u *URI) PathOriginal() []byte { return u.pathOriginal } @@ -190,7 +196,7 @@ func (u *URI) PathOriginal() []byte { // // Returned scheme is always lowercased. // -// The returned value is valid until the next URI method call. +// The returned bytes are valid until the next URI method call. func (u *URI) Scheme() []byte { scheme := u.scheme if len(scheme) == 0 { @@ -211,6 +217,14 @@ func (u *URI) SetSchemeBytes(scheme []byte) { lowercaseBytes(u.scheme) } +func (u *URI) isHttps() bool { + return bytes.Equal(u.scheme, strHTTPS) +} + +func (u *URI) isHttp() bool { + return len(u.scheme) == 0 || bytes.Equal(u.scheme, strHTTP) +} + // Reset clears uri. func (u *URI) Reset() { u.pathOriginal = u.pathOriginal[:0] @@ -236,6 +250,8 @@ func (u *URI) Reset() { // Host returns host part, i.e. aaa.com of http://aaa.com/foo/bar?baz=123#qwe . // // Host is always lowercased. +// +// The returned bytes are valid until the next URI method call. func (u *URI) Host() []byte { return u.host } @@ -275,14 +291,13 @@ func (u *URI) parse(host, uri []byte, isTLS bool) error { if len(host) == 0 || bytes.Contains(uri, strColonSlashSlash) { scheme, newHost, newURI := splitHostURI(host, uri) - u.scheme = append(u.scheme, scheme...) - lowercaseBytes(u.scheme) + u.SetSchemeBytes(scheme) host = newHost uri = newURI } if isTLS { - u.scheme = append(u.scheme[:0], strHTTPS...) + u.SetSchemeBytes(strHTTPS) } if n := bytes.IndexByte(host, '@'); n >= 0 { @@ -299,6 +314,11 @@ func (u *URI) parse(host, uri []byte, isTLS bool) error { } u.host = append(u.host, host...) + if parsedHost, err := parseHost(u.host); err != nil { + return err + } else { + u.host = parsedHost + } lowercaseBytes(u.host) b := uri @@ -338,6 +358,226 @@ func (u *URI) parse(host, uri []byte, isTLS bool) error { return nil } +// parseHost parses host as an authority without user +// information. That is, as host[:port]. +// +// Based on https://github.com/golang/go/blob/8ac5cbe05d61df0a7a7c9a38ff33305d4dcfea32/src/net/url/url.go#L619 +// +// The host is parsed and unescaped in place overwriting the contents of the host parameter. +func parseHost(host []byte) ([]byte, error) { + if len(host) > 0 && host[0] == '[' { + // Parse an IP-Literal in RFC 3986 and RFC 6874. + // E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80". + i := bytes.LastIndexByte(host, ']') + if i < 0 { + return nil, errors.New("missing ']' in host") + } + colonPort := host[i+1:] + if !validOptionalPort(colonPort) { + return nil, fmt.Errorf("invalid port %q after host", colonPort) + } + + // RFC 6874 defines that %25 (%-encoded percent) introduces + // the zone identifier, and the zone identifier can use basically + // any %-encoding it likes. That's different from the host, which + // can only %-encode non-ASCII bytes. + // We do impose some restrictions on the zone, to avoid stupidity + // like newlines. + zone := bytes.Index(host[:i], []byte("%25")) + if zone >= 0 { + host1, err := unescape(host[:zone], encodeHost) + if err != nil { + return nil, err + } + host2, err := unescape(host[zone:i], encodeZone) + if err != nil { + return nil, err + } + host3, err := unescape(host[i:], encodeHost) + if err != nil { + return nil, err + } + return append(host1, append(host2, host3...)...), nil + } + } else if i := bytes.LastIndexByte(host, ':'); i != -1 { + colonPort := host[i:] + if !validOptionalPort(colonPort) { + return nil, fmt.Errorf("invalid port %q after host", colonPort) + } + } + + var err error + if host, err = unescape(host, encodeHost); err != nil { + return nil, err + } + return host, nil +} + +type encoding int + +const ( + encodeHost encoding = 1 + iota + encodeZone +) + +type EscapeError string + +func (e EscapeError) Error() string { + return "invalid URL escape " + strconv.Quote(string(e)) +} + +type InvalidHostError string + +func (e InvalidHostError) Error() string { + return "invalid character " + strconv.Quote(string(e)) + " in host name" +} + +// unescape unescapes a string; the mode specifies +// which section of the URL string is being unescaped. +// +// Based on https://github.com/golang/go/blob/8ac5cbe05d61df0a7a7c9a38ff33305d4dcfea32/src/net/url/url.go#L199 +// +// Unescapes in place overwriting the contents of s and returning it. +func unescape(s []byte, mode encoding) ([]byte, error) { + // Count %, check that they're well-formed. + n := 0 + for i := 0; i < len(s); { + switch s[i] { + case '%': + n++ + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + s = s[i:] + if len(s) > 3 { + s = s[:3] + } + return nil, EscapeError(s) + } + // Per https://tools.ietf.org/html/rfc3986#page-21 + // in the host component %-encoding can only be used + // for non-ASCII bytes. + // But https://tools.ietf.org/html/rfc6874#section-2 + // introduces %25 being allowed to escape a percent sign + // in IPv6 scoped-address literals. Yay. + if mode == encodeHost && unhex(s[i+1]) < 8 && !bytes.Equal(s[i:i+3], []byte("%25")) { + return nil, EscapeError(s[i : i+3]) + } + if mode == encodeZone { + // RFC 6874 says basically "anything goes" for zone identifiers + // and that even non-ASCII can be redundantly escaped, + // but it seems prudent to restrict %-escaped bytes here to those + // that are valid host name bytes in their unescaped form. + // That is, you can use escaping in the zone identifier but not + // to introduce bytes you couldn't just write directly. + // But Windows puts spaces here! Yay. + v := unhex(s[i+1])<<4 | unhex(s[i+2]) + if !bytes.Equal(s[i:i+3], []byte("%25")) && v != ' ' && shouldEscape(v, encodeHost) { + return nil, EscapeError(s[i : i+3]) + } + } + i += 3 + default: + if (mode == encodeHost || mode == encodeZone) && s[i] < 0x80 && shouldEscape(s[i], mode) { + return nil, InvalidHostError(s[i : i+1]) + } + i++ + } + } + + if n == 0 { + return s, nil + } + + t := s[:0] + for i := 0; i < len(s); i++ { + switch s[i] { + case '%': + t = append(t, unhex(s[i+1])<<4|unhex(s[i+2])) + i += 2 + default: + t = append(t, s[i]) + } + } + return t, nil +} + +// Return true if the specified character should be escaped when +// appearing in a URL string, according to RFC 3986. +// +// Please be informed that for now shouldEscape does not check all +// reserved characters correctly. See golang.org/issue/5684. +// +// Based on https://github.com/golang/go/blob/8ac5cbe05d61df0a7a7c9a38ff33305d4dcfea32/src/net/url/url.go#L100 +func shouldEscape(c byte, mode encoding) bool { + // §2.3 Unreserved characters (alphanum) + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + + if mode == encodeHost || mode == encodeZone { + // §3.2.2 Host allows + // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + // as part of reg-name. + // We add : because we include :port as part of host. + // We add [ ] because we include [ipv6]:port as part of host. + // We add < > because they're the only characters left that + // we could possibly allow, and Parse will reject them if we + // escape them (because hosts can't use %-encoding for + // ASCII bytes). + switch c { + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '[', ']', '<', '>', '"': + return false + } + } + + if c == '-' || c == '_' || c == '.' || c == '~' { // §2.3 Unreserved characters (mark) + return false + } + + // Everything else must be escaped. + return true +} + +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +// validOptionalPort reports whether port is either an empty string +// or matches /^:\d*$/ +func validOptionalPort(port []byte) bool { + if len(port) == 0 { + return true + } + if port[0] != ':' { + return false + } + for _, b := range port[1:] { + if b < '0' || b > '9' { + return false + } + } + return true +} + func normalizePath(dst, src []byte) []byte { dst = dst[:0] dst = addLeadingSlash(dst, src) @@ -395,6 +635,60 @@ func normalizePath(dst, src []byte) []byte { b = b[:nn+1] } + if filepath.Separator == '\\' { + // remove \.\ parts + b = dst + for { + n := bytes.Index(b, strBackSlashDotBackSlash) + if n < 0 { + break + } + nn := n + len(strSlashDotSlash) - 1 + copy(b[n:], b[nn:]) + b = b[:len(b)-nn+n] + } + + // remove /foo/..\ parts + for { + n := bytes.Index(b, strSlashDotDotBackSlash) + if n < 0 { + break + } + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + nn = 0 + } + n += len(strSlashDotDotBackSlash) - 1 + copy(b[nn:], b[n:]) + b = b[:len(b)-n+nn] + } + + // remove /foo\..\ parts + for { + n := bytes.Index(b, strBackSlashDotDotBackSlash) + if n < 0 { + break + } + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + nn = 0 + } + n += len(strBackSlashDotDotBackSlash) - 1 + copy(b[nn:], b[n:]) + b = b[:len(b)-n+nn] + } + + // remove trailing \foo\.. + n := bytes.LastIndex(b, strBackSlashDotDot) + if n >= 0 && n+len(strSlashDotDot) == len(b) { + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + return append(dst[:0], strSlash...) + } + b = b[:nn+1] + } + } + return b } @@ -424,6 +718,8 @@ func (u *URI) RequestURI() []byte { // * For /foo/bar/baz.html path returns baz.html. // * For /foo/bar/ returns empty byte slice. // * For /foobar.js returns foobar.js. +// +// The returned bytes are valid until the next URI method call. func (u *URI) LastPathSegment() []byte { path := u.Path() n := bytes.LastIndexByte(path, '/') @@ -525,6 +821,8 @@ func (u *URI) updateBytes(newURI, buf []byte) []byte { } // FullURI returns full uri in the form {Scheme}://{Host}{RequestURI}#{Hash}. +// +// The returned bytes are valid until the next URI method call. func (u *URI) FullURI() []byte { u.fullURI = u.AppendBytes(u.fullURI[:0]) return u.fullURI @@ -591,6 +889,8 @@ func splitHostURI(host, uri []byte) ([]byte, []byte, []byte) { } // QueryArgs returns query args. +// +// The returned args are valid until the next URI method call. func (u *URI) QueryArgs() *Args { u.parseQueryArgs() return &u.queryArgs diff --git a/vendor/github.com/valyala/fasthttp/userdata.go b/vendor/github.com/valyala/fasthttp/userdata.go index bd3e28a..9a7c988 100644 --- a/vendor/github.com/valyala/fasthttp/userdata.go +++ b/vendor/github.com/valyala/fasthttp/userdata.go @@ -22,6 +22,10 @@ func (d *userData) Set(key string, value interface{}) { } } + if value == nil { + return + } + c := cap(args) if c > n { args = args[:n+1] @@ -69,3 +73,23 @@ func (d *userData) Reset() { } *d = (*d)[:0] } + +func (d *userData) Remove(key string) { + args := *d + n := len(args) + for i := 0; i < n; i++ { + kv := &args[i] + if string(kv.key) == key { + n-- + args[i] = args[n] + args[n].value = nil + args = args[:n] + *d = args + return + } + } +} + +func (d *userData) RemoveBytes(key []byte) { + d.Remove(b2s(key)) +} diff --git a/vendor/github.com/valyala/fasthttp/workerpool.go b/vendor/github.com/valyala/fasthttp/workerpool.go index 9b1987e..f1a9a4c 100644 --- a/vendor/github.com/valyala/fasthttp/workerpool.go +++ b/vendor/github.com/valyala/fasthttp/workerpool.go @@ -1,6 +1,7 @@ package fasthttp import ( + "errors" "net" "runtime" "strings" @@ -226,7 +227,8 @@ func (wp *workerPool) workerFunc(ch *workerChan) { strings.Contains(errStr, "reset by peer") || strings.Contains(errStr, "request headers: small read buffer") || strings.Contains(errStr, "unexpected EOF") || - strings.Contains(errStr, "i/o timeout")) { + strings.Contains(errStr, "i/o timeout") || + errors.Is(err, ErrBadTrailer)) { wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err) } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 342ac2a..5940666 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,12 +1,12 @@ -# github.com/andybalholm/brotli v1.0.3 +# github.com/andybalholm/brotli v1.0.4 github.com/andybalholm/brotli -# github.com/klauspost/compress v1.13.5 +# github.com/klauspost/compress v1.15.0 github.com/klauspost/compress/flate github.com/klauspost/compress/gzip github.com/klauspost/compress/zlib # github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/bytebufferpool -# github.com/valyala/fasthttp v1.30.0 +# github.com/valyala/fasthttp v1.34.0 github.com/valyala/fasthttp github.com/valyala/fasthttp/fasthttputil github.com/valyala/fasthttp/stackless