gtsocial-umbx

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs | README | LICENSE

level1.go (5888B)


      1 package flate
      2 
      3 import (
      4 	"encoding/binary"
      5 	"fmt"
      6 	"math/bits"
      7 )
      8 
      9 // fastGen maintains the table for matches,
     10 // and the previous byte block for level 2.
     11 // This is the generic implementation.
     12 type fastEncL1 struct {
     13 	fastGen
     14 	table [tableSize]tableEntry
     15 }
     16 
     17 // EncodeL1 uses a similar algorithm to level 1
     18 func (e *fastEncL1) Encode(dst *tokens, src []byte) {
     19 	const (
     20 		inputMargin            = 12 - 1
     21 		minNonLiteralBlockSize = 1 + 1 + inputMargin
     22 		hashBytes              = 5
     23 	)
     24 	if debugDeflate && e.cur < 0 {
     25 		panic(fmt.Sprint("e.cur < 0: ", e.cur))
     26 	}
     27 
     28 	// Protect against e.cur wraparound.
     29 	for e.cur >= bufferReset {
     30 		if len(e.hist) == 0 {
     31 			for i := range e.table[:] {
     32 				e.table[i] = tableEntry{}
     33 			}
     34 			e.cur = maxMatchOffset
     35 			break
     36 		}
     37 		// Shift down everything in the table that isn't already too far away.
     38 		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
     39 		for i := range e.table[:] {
     40 			v := e.table[i].offset
     41 			if v <= minOff {
     42 				v = 0
     43 			} else {
     44 				v = v - e.cur + maxMatchOffset
     45 			}
     46 			e.table[i].offset = v
     47 		}
     48 		e.cur = maxMatchOffset
     49 	}
     50 
     51 	s := e.addBlock(src)
     52 
     53 	// This check isn't in the Snappy implementation, but there, the caller
     54 	// instead of the callee handles this case.
     55 	if len(src) < minNonLiteralBlockSize {
     56 		// We do not fill the token table.
     57 		// This will be picked up by caller.
     58 		dst.n = uint16(len(src))
     59 		return
     60 	}
     61 
     62 	// Override src
     63 	src = e.hist
     64 	nextEmit := s
     65 
     66 	// sLimit is when to stop looking for offset/length copies. The inputMargin
     67 	// lets us use a fast path for emitLiteral in the main loop, while we are
     68 	// looking for copies.
     69 	sLimit := int32(len(src) - inputMargin)
     70 
     71 	// nextEmit is where in src the next emitLiteral should start from.
     72 	cv := load6432(src, s)
     73 
     74 	for {
     75 		const skipLog = 5
     76 		const doEvery = 2
     77 
     78 		nextS := s
     79 		var candidate tableEntry
     80 		for {
     81 			nextHash := hashLen(cv, tableBits, hashBytes)
     82 			candidate = e.table[nextHash]
     83 			nextS = s + doEvery + (s-nextEmit)>>skipLog
     84 			if nextS > sLimit {
     85 				goto emitRemainder
     86 			}
     87 
     88 			now := load6432(src, nextS)
     89 			e.table[nextHash] = tableEntry{offset: s + e.cur}
     90 			nextHash = hashLen(now, tableBits, hashBytes)
     91 
     92 			offset := s - (candidate.offset - e.cur)
     93 			if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
     94 				e.table[nextHash] = tableEntry{offset: nextS + e.cur}
     95 				break
     96 			}
     97 
     98 			// Do one right away...
     99 			cv = now
    100 			s = nextS
    101 			nextS++
    102 			candidate = e.table[nextHash]
    103 			now >>= 8
    104 			e.table[nextHash] = tableEntry{offset: s + e.cur}
    105 
    106 			offset = s - (candidate.offset - e.cur)
    107 			if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
    108 				e.table[nextHash] = tableEntry{offset: nextS + e.cur}
    109 				break
    110 			}
    111 			cv = now
    112 			s = nextS
    113 		}
    114 
    115 		// A 4-byte match has been found. We'll later see if more than 4 bytes
    116 		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
    117 		// them as literal bytes.
    118 		for {
    119 			// Invariant: we have a 4-byte match at s, and no need to emit any
    120 			// literal bytes prior to s.
    121 
    122 			// Extend the 4-byte match as long as possible.
    123 			t := candidate.offset - e.cur
    124 			var l = int32(4)
    125 			if false {
    126 				l = e.matchlenLong(s+4, t+4, src) + 4
    127 			} else {
    128 				// inlined:
    129 				a := src[s+4:]
    130 				b := src[t+4:]
    131 				for len(a) >= 8 {
    132 					if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
    133 						l += int32(bits.TrailingZeros64(diff) >> 3)
    134 						break
    135 					}
    136 					l += 8
    137 					a = a[8:]
    138 					b = b[8:]
    139 				}
    140 				if len(a) < 8 {
    141 					b = b[:len(a)]
    142 					for i := range a {
    143 						if a[i] != b[i] {
    144 							break
    145 						}
    146 						l++
    147 					}
    148 				}
    149 			}
    150 
    151 			// Extend backwards
    152 			for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
    153 				s--
    154 				t--
    155 				l++
    156 			}
    157 			if nextEmit < s {
    158 				if false {
    159 					emitLiteral(dst, src[nextEmit:s])
    160 				} else {
    161 					for _, v := range src[nextEmit:s] {
    162 						dst.tokens[dst.n] = token(v)
    163 						dst.litHist[v]++
    164 						dst.n++
    165 					}
    166 				}
    167 			}
    168 
    169 			// Save the match found
    170 			if false {
    171 				dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
    172 			} else {
    173 				// Inlined...
    174 				xoffset := uint32(s - t - baseMatchOffset)
    175 				xlength := l
    176 				oc := offsetCode(xoffset)
    177 				xoffset |= oc << 16
    178 				for xlength > 0 {
    179 					xl := xlength
    180 					if xl > 258 {
    181 						if xl > 258+baseMatchLength {
    182 							xl = 258
    183 						} else {
    184 							xl = 258 - baseMatchLength
    185 						}
    186 					}
    187 					xlength -= xl
    188 					xl -= baseMatchLength
    189 					dst.extraHist[lengthCodes1[uint8(xl)]]++
    190 					dst.offHist[oc]++
    191 					dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
    192 					dst.n++
    193 				}
    194 			}
    195 			s += l
    196 			nextEmit = s
    197 			if nextS >= s {
    198 				s = nextS + 1
    199 			}
    200 			if s >= sLimit {
    201 				// Index first pair after match end.
    202 				if int(s+l+8) < len(src) {
    203 					cv := load6432(src, s)
    204 					e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
    205 				}
    206 				goto emitRemainder
    207 			}
    208 
    209 			// We could immediately start working at s now, but to improve
    210 			// compression we first update the hash table at s-2 and at s. If
    211 			// another emitCopy is not our next move, also calculate nextHash
    212 			// at s+1. At least on GOARCH=amd64, these three hash calculations
    213 			// are faster as one load64 call (with some shifts) instead of
    214 			// three load32 calls.
    215 			x := load6432(src, s-2)
    216 			o := e.cur + s - 2
    217 			prevHash := hashLen(x, tableBits, hashBytes)
    218 			e.table[prevHash] = tableEntry{offset: o}
    219 			x >>= 16
    220 			currHash := hashLen(x, tableBits, hashBytes)
    221 			candidate = e.table[currHash]
    222 			e.table[currHash] = tableEntry{offset: o + 2}
    223 
    224 			offset := s - (candidate.offset - e.cur)
    225 			if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
    226 				cv = x >> 8
    227 				s++
    228 				break
    229 			}
    230 		}
    231 	}
    232 
    233 emitRemainder:
    234 	if int(nextEmit) < len(src) {
    235 		// If nothing was added, don't encode literals.
    236 		if dst.n == 0 {
    237 			return
    238 		}
    239 		emitLiteral(dst, src[nextEmit:])
    240 	}
    241 }