mirror of
https://gitee.com/johng/gf.git
synced 2024-12-03 04:37:49 +08:00
updates kafka dependences
This commit is contained in:
parent
7ad4f61564
commit
783c0ba846
30
third/github.com/DataDog/zstd/.travis.yml
Normal file
30
third/github.com/DataDog/zstd/.travis.yml
Normal file
@ -0,0 +1,30 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
matrix:
|
||||
include:
|
||||
name: "Go 1.11.x CentOS 32bits"
|
||||
language: go
|
||||
go: 1.11.x
|
||||
os: linux
|
||||
services:
|
||||
- docker
|
||||
script:
|
||||
# Please update Go version in travis_test_32 as needed
|
||||
- "docker run -i -v \"${PWD}:/zstd\" toopher/centos-i386:centos6 /bin/bash -c \"linux32 --32bit i386 /zstd/travis_test_32.sh\""
|
||||
|
||||
install:
|
||||
- "wget https://github.com/DataDog/zstd/files/2246767/mr.zip"
|
||||
- "unzip mr.zip"
|
||||
script:
|
||||
- "go build"
|
||||
- "PAYLOAD=`pwd`/mr go test -v"
|
||||
- "PAYLOAD=`pwd`/mr go test -bench ."
|
27
third/github.com/DataDog/zstd/LICENSE
Normal file
27
third/github.com/DataDog/zstd/LICENSE
Normal file
@ -0,0 +1,27 @@
|
||||
Simplified BSD License
|
||||
|
||||
Copyright (c) 2016, Datadog <info@datadoghq.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the copyright holder nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
120
third/github.com/DataDog/zstd/README.md
Normal file
120
third/github.com/DataDog/zstd/README.md
Normal file
@ -0,0 +1,120 @@
|
||||
# Zstd Go Wrapper
|
||||
|
||||
[C Zstd Homepage](https://github.com/Cyan4973/zstd)
|
||||
|
||||
The current headers and C files are from *v1.3.4* (Commit
|
||||
[2555975](https://github.com/facebook/zstd/releases/tag/v1.3.4)).
|
||||
|
||||
## Usage
|
||||
|
||||
There are two main APIs:
|
||||
|
||||
* simple Compress/Decompress
|
||||
* streaming API (io.Reader/io.Writer)
|
||||
|
||||
The compress/decompress APIs mirror that of lz4, while the streaming API was
|
||||
designed to be a drop-in replacement for zlib.
|
||||
|
||||
### Simple `Compress/Decompress`
|
||||
|
||||
|
||||
```go
|
||||
// Compress compresses the byte array given in src and writes it to dst.
|
||||
// If you already have a buffer allocated, you can pass it to prevent allocation
|
||||
// If not, you can pass nil as dst.
|
||||
// If the buffer is too small, it will be reallocated, resized, and returned bu the function
|
||||
// If dst is nil, this will allocate the worst case size (CompressBound(src))
|
||||
Compress(dst, src []byte) ([]byte, error)
|
||||
```
|
||||
|
||||
```go
|
||||
// CompressLevel is the same as Compress but you can pass another compression level
|
||||
CompressLevel(dst, src []byte, level int) ([]byte, error)
|
||||
```
|
||||
|
||||
```go
|
||||
// Decompress will decompress your payload into dst.
|
||||
// If you already have a buffer allocated, you can pass it to prevent allocation
|
||||
// If not, you can pass nil as dst (allocates a 4*src size as default).
|
||||
// If the buffer is too small, it will retry 3 times by doubling the dst size
|
||||
// After max retries, it will switch to the slower stream API to be sure to be able
|
||||
// to decompress. Currently switches if compression ratio > 4*2**3=32.
|
||||
Decompress(dst, src []byte) ([]byte, error)
|
||||
```
|
||||
|
||||
### Stream API
|
||||
|
||||
```go
|
||||
// NewWriter creates a new object that can optionally be initialized with
|
||||
// a precomputed dictionary. If dict is nil, compress without a dictionary.
|
||||
// The dictionary array should not be changed during the use of this object.
|
||||
// You MUST CALL Close() to write the last bytes of a zstd stream and free C objects.
|
||||
NewWriter(w io.Writer) *Writer
|
||||
NewWriterLevel(w io.Writer, level int) *Writer
|
||||
NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer
|
||||
|
||||
// Write compresses the input data and write it to the underlying writer
|
||||
(w *Writer) Write(p []byte) (int, error)
|
||||
|
||||
// Close flushes the buffer and frees C zstd objects
|
||||
(w *Writer) Close() error
|
||||
```
|
||||
|
||||
```go
|
||||
// NewReader returns a new io.ReadCloser that will decompress data from the
|
||||
// underlying reader. If a dictionary is provided to NewReaderDict, it must
|
||||
// not be modified until Close is called. It is the caller's responsibility
|
||||
// to call Close, which frees up C objects.
|
||||
NewReader(r io.Reader) io.ReadCloser
|
||||
NewReaderDict(r io.Reader, dict []byte) io.ReadCloser
|
||||
```
|
||||
|
||||
### Benchmarks (benchmarked with v0.5.0)
|
||||
|
||||
The author of Zstd also wrote lz4. Zstd is intended to occupy a speed/ratio
|
||||
level similar to what zlib currently provides. In our tests, the can always
|
||||
be made to be better than zlib by chosing an appropriate level while still
|
||||
keeping compression and decompression time faster than zlib.
|
||||
|
||||
You can run the benchmarks against your own payloads by using the Go benchmarks tool.
|
||||
Just export your payload filepath as the `PAYLOAD` environment variable and run the benchmarks:
|
||||
|
||||
```go
|
||||
go test -bench .
|
||||
```
|
||||
|
||||
Compression of a 7Mb pdf zstd (this wrapper) vs [czlib](https://github.com/DataDog/czlib):
|
||||
```
|
||||
BenchmarkCompression 5 221056624 ns/op 67.34 MB/s
|
||||
BenchmarkDecompression 100 18370416 ns/op 810.32 MB/s
|
||||
|
||||
BenchmarkFzlibCompress 2 610156603 ns/op 24.40 MB/s
|
||||
BenchmarkFzlibDecompress 20 81195246 ns/op 183.33 MB/s
|
||||
```
|
||||
|
||||
Ratio is also better by a margin of ~20%.
|
||||
Compression speed is always better than zlib on all the payloads we tested;
|
||||
However, [czlib](https://github.com/DataDog/czlib) has optimisations that make it
|
||||
faster at decompressiong small payloads:
|
||||
|
||||
```
|
||||
Testing with size: 11... czlib: 8.97 MB/s, zstd: 3.26 MB/s
|
||||
Testing with size: 27... czlib: 23.3 MB/s, zstd: 8.22 MB/s
|
||||
Testing with size: 62... czlib: 31.6 MB/s, zstd: 19.49 MB/s
|
||||
Testing with size: 141... czlib: 74.54 MB/s, zstd: 42.55 MB/s
|
||||
Testing with size: 323... czlib: 155.14 MB/s, zstd: 99.39 MB/s
|
||||
Testing with size: 739... czlib: 235.9 MB/s, zstd: 216.45 MB/s
|
||||
Testing with size: 1689... czlib: 116.45 MB/s, zstd: 345.64 MB/s
|
||||
Testing with size: 3858... czlib: 176.39 MB/s, zstd: 617.56 MB/s
|
||||
Testing with size: 8811... czlib: 254.11 MB/s, zstd: 824.34 MB/s
|
||||
Testing with size: 20121... czlib: 197.43 MB/s, zstd: 1339.11 MB/s
|
||||
Testing with size: 45951... czlib: 201.62 MB/s, zstd: 1951.57 MB/s
|
||||
```
|
||||
|
||||
zstd starts to shine with payloads > 1KB
|
||||
|
||||
### Stability - Current state: STABLE
|
||||
|
||||
The C library seems to be pretty stable and according to the author has been tested and fuzzed.
|
||||
|
||||
For the Go wrapper, the test cover most usual cases and we have succesfully tested it on all staging and prod data.
|
30
third/github.com/DataDog/zstd/ZSTD_LICENSE
Normal file
30
third/github.com/DataDog/zstd/ZSTD_LICENSE
Normal file
@ -0,0 +1,30 @@
|
||||
BSD License
|
||||
|
||||
For Zstandard software
|
||||
|
||||
Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name Facebook nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
471
third/github.com/DataDog/zstd/bitstream.h
Normal file
471
third/github.com/DataDog/zstd/bitstream.h
Normal file
@ -0,0 +1,471 @@
|
||||
/* ******************************************************************
|
||||
bitstream
|
||||
Part of FSE library
|
||||
header file (to include)
|
||||
Copyright (C) 2013-2017, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
****************************************************************** */
|
||||
#ifndef BITSTREAM_H_MODULE
|
||||
#define BITSTREAM_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This API consists of small unitary functions, which must be inlined for best performance.
|
||||
* Since link-time-optimization is not available for all compilers,
|
||||
* these functions are defined into a .h to be included.
|
||||
*/
|
||||
|
||||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include "mem.h" /* unaligned access routines */
|
||||
#include "error_private.h" /* error codes and messages */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Debug
|
||||
***************************************/
|
||||
#if defined(BIT_DEBUG) && (BIT_DEBUG>=1)
|
||||
# include <assert.h>
|
||||
#else
|
||||
# ifndef assert
|
||||
# define assert(condition) ((void)0)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/*=========================================
|
||||
* Target specific
|
||||
=========================================*/
|
||||
#if defined(__BMI__) && defined(__GNUC__)
|
||||
# include <immintrin.h> /* support for bextr (experimental) */
|
||||
#endif
|
||||
|
||||
#define STREAM_ACCUMULATOR_MIN_32 25
|
||||
#define STREAM_ACCUMULATOR_MIN_64 57
|
||||
#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
|
||||
|
||||
|
||||
/*-******************************************
|
||||
* bitStream encoding API (write forward)
|
||||
********************************************/
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
size_t bitContainer;
|
||||
unsigned bitPos;
|
||||
char* startPtr;
|
||||
char* ptr;
|
||||
char* endPtr;
|
||||
} BIT_CStream_t;
|
||||
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
|
||||
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
|
||||
|
||||
/* Start with initCStream, providing the size of buffer to write into.
|
||||
* bitStream will never write outside of this buffer.
|
||||
* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
|
||||
*
|
||||
* bits are first added to a local register.
|
||||
* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
|
||||
* Writing data into memory is an explicit operation, performed by the flushBits function.
|
||||
* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
|
||||
* After a flushBits, a maximum of 7 bits might still be stored into local register.
|
||||
*
|
||||
* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
|
||||
*
|
||||
* Last operation is to close the bitStream.
|
||||
* The function returns the final size of CStream in bytes.
|
||||
* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
|
||||
*/
|
||||
|
||||
|
||||
/*-********************************************
|
||||
* bitStream decoding API (read backward)
|
||||
**********************************************/
|
||||
typedef struct
|
||||
{
|
||||
size_t bitContainer;
|
||||
unsigned bitsConsumed;
|
||||
const char* ptr;
|
||||
const char* start;
|
||||
const char* limitPtr;
|
||||
} BIT_DStream_t;
|
||||
|
||||
typedef enum { BIT_DStream_unfinished = 0,
|
||||
BIT_DStream_endOfBuffer = 1,
|
||||
BIT_DStream_completed = 2,
|
||||
BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
|
||||
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
|
||||
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
|
||||
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
|
||||
|
||||
|
||||
/* Start by invoking BIT_initDStream().
|
||||
* A chunk of the bitStream is then stored into a local register.
|
||||
* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
|
||||
* You can then retrieve bitFields stored into the local register, **in reverse order**.
|
||||
* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
|
||||
* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
|
||||
* Otherwise, it can be less than that, so proceed accordingly.
|
||||
* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
|
||||
*/
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* unsafe API
|
||||
******************************************/
|
||||
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
|
||||
/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
|
||||
|
||||
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
|
||||
/* unsafe version; does not check buffer overflow */
|
||||
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
|
||||
/* faster, but works only if nbBits >= 1 */
|
||||
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Internal functions
|
||||
****************************************************************/
|
||||
MEM_STATIC unsigned BIT_highbit32 (U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
{
|
||||
# if defined(_MSC_VER) /* Visual */
|
||||
unsigned long r=0;
|
||||
_BitScanReverse ( &r, val );
|
||||
return (unsigned) r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
|
||||
return 31 - __builtin_clz (val);
|
||||
# else /* Software version */
|
||||
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
|
||||
11, 14, 16, 18, 22, 25, 3, 30,
|
||||
8, 12, 20, 28, 15, 17, 24, 7,
|
||||
19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
v |= v >> 1;
|
||||
v |= v >> 2;
|
||||
v |= v >> 4;
|
||||
v |= v >> 8;
|
||||
v |= v >> 16;
|
||||
return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
|
||||
# endif
|
||||
}
|
||||
}
|
||||
|
||||
/*===== Local Constants =====*/
|
||||
static const unsigned BIT_mask[] = {
|
||||
0, 1, 3, 7, 0xF, 0x1F,
|
||||
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
|
||||
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
|
||||
0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
|
||||
0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
|
||||
0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
|
||||
#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
|
||||
|
||||
/*-**************************************************************
|
||||
* bitStream encoding
|
||||
****************************************************************/
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
|
||||
void* startPtr, size_t dstCapacity)
|
||||
{
|
||||
bitC->bitContainer = 0;
|
||||
bitC->bitPos = 0;
|
||||
bitC->startPtr = (char*)startPtr;
|
||||
bitC->ptr = bitC->startPtr;
|
||||
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
|
||||
if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
* can add up to 31 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_addBitsFast() :
|
||||
* works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
|
||||
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
assert((value>>nbBits) == 0);
|
||||
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
bitC->bitContainer |= value << bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_flushBitsFast() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* unsafe version; does not check buffer overflow */
|
||||
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
|
||||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
||||
/*! BIT_flushBits() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* safe version; check for buffer overflow, and prevents it.
|
||||
* note : does not signal buffer overflow.
|
||||
* overflow will be revealed later on using BIT_closeCStream() */
|
||||
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
|
||||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
|
||||
{
|
||||
BIT_addBitsFast(bitC, 1, 1); /* endMark */
|
||||
BIT_flushBits(bitC);
|
||||
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
|
||||
return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
|
||||
}
|
||||
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
|
||||
{
|
||||
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
|
||||
|
||||
bitD->start = (const char*)srcBuffer;
|
||||
bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
|
||||
|
||||
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
|
||||
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
|
||||
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
|
||||
} else {
|
||||
bitD->ptr = bitD->start;
|
||||
bitD->bitContainer = *(const BYTE*)(bitD->start);
|
||||
switch(srcSize)
|
||||
{
|
||||
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
|
||||
/* fall-through */
|
||||
|
||||
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
|
||||
/* fall-through */
|
||||
|
||||
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
|
||||
/* fall-through */
|
||||
|
||||
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
|
||||
/* fall-through */
|
||||
|
||||
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
|
||||
/* fall-through */
|
||||
|
||||
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
|
||||
/* fall-through */
|
||||
|
||||
default: break;
|
||||
}
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
|
||||
}
|
||||
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
|
||||
{
|
||||
return bitContainer >> start;
|
||||
}
|
||||
|
||||
MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
|
||||
{
|
||||
#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008 /* experimental */
|
||||
# if defined(__x86_64__)
|
||||
if (sizeof(bitContainer)==8)
|
||||
return _bextr_u64(bitContainer, start, nbBits);
|
||||
else
|
||||
# endif
|
||||
return _bextr_u32(bitContainer, start, nbBits);
|
||||
#else
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
return (bitContainer >> start) & BIT_mask[nbBits];
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
|
||||
{
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
return bitContainer & BIT_mask[nbBits];
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
#if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
|
||||
return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
|
||||
#else
|
||||
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
|
||||
return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
|
||||
assert(nbBits >= 1);
|
||||
return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
|
||||
}
|
||||
|
||||
MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
bitD->bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBits(bitD, nbBits);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works only if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBitsFast(bitD, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
|
||||
return BIT_DStream_overflow;
|
||||
|
||||
if (bitD->ptr >= bitD->limitPtr) {
|
||||
bitD->ptr -= bitD->bitsConsumed >> 3;
|
||||
bitD->bitsConsumed &= 7;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return BIT_DStream_unfinished;
|
||||
}
|
||||
if (bitD->ptr == bitD->start) {
|
||||
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_completed;
|
||||
}
|
||||
/* start < ptr < limitPtr */
|
||||
{ U32 nbBytes = bitD->bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_unfinished;
|
||||
if (bitD->ptr - nbBytes < bitD->start) {
|
||||
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
|
||||
result = BIT_DStream_endOfBuffer;
|
||||
}
|
||||
bitD->ptr -= nbBytes;
|
||||
bitD->bitsConsumed -= nbBytes*8;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
|
||||
{
|
||||
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* BITSTREAM_H_MODULE */
|
111
third/github.com/DataDog/zstd/compiler.h
Normal file
111
third/github.com/DataDog/zstd/compiler.h
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPILER_H
|
||||
#define ZSTD_COMPILER_H
|
||||
|
||||
/*-*******************************************************
|
||||
* Compiler specifics
|
||||
*********************************************************/
|
||||
/* force inlining */
|
||||
#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# define INLINE_KEYWORD inline
|
||||
#else
|
||||
# define INLINE_KEYWORD
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
||||
#elif defined(_MSC_VER)
|
||||
# define FORCE_INLINE_ATTR __forceinline
|
||||
#else
|
||||
# define FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/**
|
||||
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
|
||||
* parameters. They must be inlined for the compiler to elimininate the constant
|
||||
* branches.
|
||||
*/
|
||||
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
/**
|
||||
* HINT_INLINE is used to help the compiler generate better code. It is *not*
|
||||
* used for "templates", so it can be tweaked based on the compilers
|
||||
* performance.
|
||||
*
|
||||
* gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
|
||||
* always_inline attribute.
|
||||
*
|
||||
* clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
|
||||
* attribute.
|
||||
*/
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
|
||||
# define HINT_INLINE static INLINE_KEYWORD
|
||||
#else
|
||||
# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/* force no inlining */
|
||||
#ifdef _MSC_VER
|
||||
# define FORCE_NOINLINE static __declspec(noinline)
|
||||
#else
|
||||
# ifdef __GNUC__
|
||||
# define FORCE_NOINLINE static __attribute__((__noinline__))
|
||||
# else
|
||||
# define FORCE_NOINLINE static
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* target attribute */
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */
|
||||
#endif
|
||||
#if defined(__GNUC__)
|
||||
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
|
||||
#else
|
||||
# define TARGET_ATTRIBUTE(target)
|
||||
#endif
|
||||
|
||||
/* Enable runtime BMI2 dispatch based on the CPU.
|
||||
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
|
||||
*/
|
||||
#ifndef DYNAMIC_BMI2
|
||||
#if (defined(__clang__) && __has_attribute(__target__)) \
|
||||
|| (defined(__GNUC__) \
|
||||
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) \
|
||||
&& (defined(__x86_64__) || defined(_M_X86)) \
|
||||
&& !defined(__BMI2__)
|
||||
# define DYNAMIC_BMI2 1
|
||||
#else
|
||||
# define DYNAMIC_BMI2 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* prefetch */
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
|
||||
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
|
||||
# define PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0)
|
||||
#elif defined(__GNUC__)
|
||||
# define PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
|
||||
#else
|
||||
# define PREFETCH(ptr) /* disabled */
|
||||
#endif
|
||||
|
||||
/* disable warnings */
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# include <intrin.h> /* For Visual 2005 */
|
||||
# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
|
||||
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_COMPILER_H */
|
1048
third/github.com/DataDog/zstd/cover.c
Normal file
1048
third/github.com/DataDog/zstd/cover.c
Normal file
File diff suppressed because it is too large
Load Diff
216
third/github.com/DataDog/zstd/cpu.h
Executable file
216
third/github.com/DataDog/zstd/cpu.h
Executable file
@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Copyright (c) 2018-present, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMMON_CPU_H
|
||||
#define ZSTD_COMMON_CPU_H
|
||||
|
||||
/**
|
||||
* Implementation taken from folly/CpuId.h
|
||||
* https://github.com/facebook/folly/blob/master/folly/CpuId.h
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "mem.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
U32 f1c;
|
||||
U32 f1d;
|
||||
U32 f7b;
|
||||
U32 f7c;
|
||||
} ZSTD_cpuid_t;
|
||||
|
||||
MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
|
||||
U32 f1c = 0;
|
||||
U32 f1d = 0;
|
||||
U32 f7b = 0;
|
||||
U32 f7c = 0;
|
||||
#ifdef _MSC_VER
|
||||
int reg[4];
|
||||
__cpuid((int*)reg, 0);
|
||||
{
|
||||
int const n = reg[0];
|
||||
if (n >= 1) {
|
||||
__cpuid((int*)reg, 1);
|
||||
f1c = (U32)reg[2];
|
||||
f1d = (U32)reg[3];
|
||||
}
|
||||
if (n >= 7) {
|
||||
__cpuidex((int*)reg, 7, 0);
|
||||
f7b = (U32)reg[1];
|
||||
f7c = (U32)reg[2];
|
||||
}
|
||||
}
|
||||
#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
|
||||
/* The following block like the normal cpuid branch below, but gcc
|
||||
* reserves ebx for use of its pic register so we must specially
|
||||
* handle the save and restore to avoid clobbering the register
|
||||
*/
|
||||
U32 n;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(n)
|
||||
: "a"(0)
|
||||
: "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
U32 f1a;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(f1a), "=c"(f1c), "=d"(f1d)
|
||||
: "a"(1)
|
||||
:);
|
||||
}
|
||||
if (n >= 7) {
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"movl %%ebx, %%eax\n\r"
|
||||
"popl %%ebx"
|
||||
: "=a"(f7b), "=c"(f7c)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
|
||||
U32 n;
|
||||
__asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
U32 f1a;
|
||||
__asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
|
||||
}
|
||||
if (n >= 7) {
|
||||
U32 f7a;
|
||||
__asm__("cpuid"
|
||||
: "=a"(f7a), "=b"(f7b), "=c"(f7c)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#endif
|
||||
{
|
||||
ZSTD_cpuid_t cpuid;
|
||||
cpuid.f1c = f1c;
|
||||
cpuid.f1d = f1d;
|
||||
cpuid.f7b = f7b;
|
||||
cpuid.f7c = f7c;
|
||||
return cpuid;
|
||||
}
|
||||
}
|
||||
|
||||
#define X(name, r, bit) \
|
||||
MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
|
||||
return ((cpuid.r) & (1U << bit)) != 0; \
|
||||
}
|
||||
|
||||
/* cpuid(1): Processor Info and Feature Bits. */
|
||||
#define C(name, bit) X(name, f1c, bit)
|
||||
C(sse3, 0)
|
||||
C(pclmuldq, 1)
|
||||
C(dtes64, 2)
|
||||
C(monitor, 3)
|
||||
C(dscpl, 4)
|
||||
C(vmx, 5)
|
||||
C(smx, 6)
|
||||
C(eist, 7)
|
||||
C(tm2, 8)
|
||||
C(ssse3, 9)
|
||||
C(cnxtid, 10)
|
||||
C(fma, 12)
|
||||
C(cx16, 13)
|
||||
C(xtpr, 14)
|
||||
C(pdcm, 15)
|
||||
C(pcid, 17)
|
||||
C(dca, 18)
|
||||
C(sse41, 19)
|
||||
C(sse42, 20)
|
||||
C(x2apic, 21)
|
||||
C(movbe, 22)
|
||||
C(popcnt, 23)
|
||||
C(tscdeadline, 24)
|
||||
C(aes, 25)
|
||||
C(xsave, 26)
|
||||
C(osxsave, 27)
|
||||
C(avx, 28)
|
||||
C(f16c, 29)
|
||||
C(rdrand, 30)
|
||||
#undef C
|
||||
#define D(name, bit) X(name, f1d, bit)
|
||||
D(fpu, 0)
|
||||
D(vme, 1)
|
||||
D(de, 2)
|
||||
D(pse, 3)
|
||||
D(tsc, 4)
|
||||
D(msr, 5)
|
||||
D(pae, 6)
|
||||
D(mce, 7)
|
||||
D(cx8, 8)
|
||||
D(apic, 9)
|
||||
D(sep, 11)
|
||||
D(mtrr, 12)
|
||||
D(pge, 13)
|
||||
D(mca, 14)
|
||||
D(cmov, 15)
|
||||
D(pat, 16)
|
||||
D(pse36, 17)
|
||||
D(psn, 18)
|
||||
D(clfsh, 19)
|
||||
D(ds, 21)
|
||||
D(acpi, 22)
|
||||
D(mmx, 23)
|
||||
D(fxsr, 24)
|
||||
D(sse, 25)
|
||||
D(sse2, 26)
|
||||
D(ss, 27)
|
||||
D(htt, 28)
|
||||
D(tm, 29)
|
||||
D(pbe, 31)
|
||||
#undef D
|
||||
|
||||
/* cpuid(7): Extended Features. */
|
||||
#define B(name, bit) X(name, f7b, bit)
|
||||
B(bmi1, 3)
|
||||
B(hle, 4)
|
||||
B(avx2, 5)
|
||||
B(smep, 7)
|
||||
B(bmi2, 8)
|
||||
B(erms, 9)
|
||||
B(invpcid, 10)
|
||||
B(rtm, 11)
|
||||
B(mpx, 14)
|
||||
B(avx512f, 16)
|
||||
B(avx512dq, 17)
|
||||
B(rdseed, 18)
|
||||
B(adx, 19)
|
||||
B(smap, 20)
|
||||
B(avx512ifma, 21)
|
||||
B(pcommit, 22)
|
||||
B(clflushopt, 23)
|
||||
B(clwb, 24)
|
||||
B(avx512pf, 26)
|
||||
B(avx512er, 27)
|
||||
B(avx512cd, 28)
|
||||
B(sha, 29)
|
||||
B(avx512bw, 30)
|
||||
B(avx512vl, 31)
|
||||
#undef B
|
||||
#define C(name, bit) X(name, f7c, bit)
|
||||
C(prefetchwt1, 0)
|
||||
C(avx512vbmi, 1)
|
||||
#undef C
|
||||
|
||||
#undef X
|
||||
|
||||
#endif /* ZSTD_COMMON_CPU_H */
|
1913
third/github.com/DataDog/zstd/divsufsort.c
Normal file
1913
third/github.com/DataDog/zstd/divsufsort.c
Normal file
File diff suppressed because it is too large
Load Diff
67
third/github.com/DataDog/zstd/divsufsort.h
Normal file
67
third/github.com/DataDog/zstd/divsufsort.h
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
* divsufsort.h for libdivsufsort-lite
|
||||
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use,
|
||||
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following
|
||||
* conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DIVSUFSORT_H
|
||||
#define _DIVSUFSORT_H 1
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
|
||||
/*- Prototypes -*/
|
||||
|
||||
/**
|
||||
* Constructs the suffix array of a given string.
|
||||
* @param T [0..n-1] The input string.
|
||||
* @param SA [0..n-1] The output array of suffixes.
|
||||
* @param n The length of the given string.
|
||||
* @param openMP enables OpenMP optimization.
|
||||
* @return 0 if no error occurred, -1 or -2 otherwise.
|
||||
*/
|
||||
int
|
||||
divsufsort(const unsigned char *T, int *SA, int n, int openMP);
|
||||
|
||||
/**
|
||||
* Constructs the burrows-wheeler transformed string of a given string.
|
||||
* @param T [0..n-1] The input string.
|
||||
* @param U [0..n-1] The output string. (can be T)
|
||||
* @param A [0..n-1] The temporary array. (can be NULL)
|
||||
* @param n The length of the given string.
|
||||
* @param num_indexes The length of secondary indexes array. (can be NULL)
|
||||
* @param indexes The secondary indexes array. (can be NULL)
|
||||
* @param openMP enables OpenMP optimization.
|
||||
* @return The primary index if no error occurred, -1 or -2 otherwise.
|
||||
*/
|
||||
int
|
||||
divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif /* _DIVSUFSORT_H */
|
221
third/github.com/DataDog/zstd/entropy_common.c
Normal file
221
third/github.com/DataDog/zstd/entropy_common.c
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
Common functions of New Generation Entropy library
|
||||
Copyright (C) 2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*************************************************************************** */
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "mem.h"
|
||||
#include "error_private.h" /* ERR_*, ERROR */
|
||||
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
|
||||
#include "fse.h"
|
||||
#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
|
||||
#include "huf.h"
|
||||
|
||||
|
||||
/*=== Version ===*/
|
||||
unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
|
||||
|
||||
|
||||
/*=== Error Management ===*/
|
||||
unsigned FSE_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
unsigned HUF_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) headerBuffer;
|
||||
const BYTE* const iend = istart + hbSize;
|
||||
const BYTE* ip = istart;
|
||||
int nbBits;
|
||||
int remaining;
|
||||
int threshold;
|
||||
U32 bitStream;
|
||||
int bitCount;
|
||||
unsigned charnum = 0;
|
||||
int previous0 = 0;
|
||||
|
||||
if (hbSize < 4) return ERROR(srcSize_wrong);
|
||||
bitStream = MEM_readLE32(ip);
|
||||
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
|
||||
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
|
||||
bitStream >>= 4;
|
||||
bitCount = 4;
|
||||
*tableLogPtr = nbBits;
|
||||
remaining = (1<<nbBits)+1;
|
||||
threshold = 1<<nbBits;
|
||||
nbBits++;
|
||||
|
||||
while ((remaining>1) & (charnum<=*maxSVPtr)) {
|
||||
if (previous0) {
|
||||
unsigned n0 = charnum;
|
||||
while ((bitStream & 0xFFFF) == 0xFFFF) {
|
||||
n0 += 24;
|
||||
if (ip < iend-5) {
|
||||
ip += 2;
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
} else {
|
||||
bitStream >>= 16;
|
||||
bitCount += 16;
|
||||
} }
|
||||
while ((bitStream & 3) == 3) {
|
||||
n0 += 3;
|
||||
bitStream >>= 2;
|
||||
bitCount += 2;
|
||||
}
|
||||
n0 += bitStream & 3;
|
||||
bitCount += 2;
|
||||
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
|
||||
while (charnum < n0) normalizedCounter[charnum++] = 0;
|
||||
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
|
||||
ip += bitCount>>3;
|
||||
bitCount &= 7;
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
} else {
|
||||
bitStream >>= 2;
|
||||
} }
|
||||
{ int const max = (2*threshold-1) - remaining;
|
||||
int count;
|
||||
|
||||
if ((bitStream & (threshold-1)) < (U32)max) {
|
||||
count = bitStream & (threshold-1);
|
||||
bitCount += nbBits-1;
|
||||
} else {
|
||||
count = bitStream & (2*threshold-1);
|
||||
if (count >= threshold) count -= max;
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--; /* extra accuracy */
|
||||
remaining -= count < 0 ? -count : count; /* -1 means +1 */
|
||||
normalizedCounter[charnum++] = (short)count;
|
||||
previous0 = !count;
|
||||
while (remaining < threshold) {
|
||||
nbBits--;
|
||||
threshold >>= 1;
|
||||
}
|
||||
|
||||
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
|
||||
ip += bitCount>>3;
|
||||
bitCount &= 7;
|
||||
} else {
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
ip = iend - 4;
|
||||
}
|
||||
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
|
||||
} } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
|
||||
if (remaining != 1) return ERROR(corruption_detected);
|
||||
if (bitCount > 32) return ERROR(corruption_detected);
|
||||
*maxSVPtr = charnum-1;
|
||||
|
||||
ip += (bitCount+7)>>3;
|
||||
return ip-istart;
|
||||
}
|
||||
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
U32 weightTotal;
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
size_t iSize;
|
||||
size_t oSize;
|
||||
|
||||
if (!srcSize) return ERROR(srcSize_wrong);
|
||||
iSize = ip[0];
|
||||
/* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
if (iSize >= 128) { /* special header */
|
||||
oSize = iSize - 127;
|
||||
iSize = ((oSize+1)/2);
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
if (oSize >= hwSize) return ERROR(corruption_detected);
|
||||
ip += 1;
|
||||
{ U32 n;
|
||||
for (n=0; n<oSize; n+=2) {
|
||||
huffWeight[n] = ip[n/2] >> 4;
|
||||
huffWeight[n+1] = ip[n/2] & 15;
|
||||
} } }
|
||||
else { /* header compressed with FSE (normal case) */
|
||||
FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */
|
||||
if (FSE_isError(oSize)) return oSize;
|
||||
}
|
||||
|
||||
/* collect weight stats */
|
||||
memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
|
||||
weightTotal = 0;
|
||||
{ U32 n; for (n=0; n<oSize; n++) {
|
||||
if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (1 << huffWeight[n]) >> 1;
|
||||
} }
|
||||
if (weightTotal == 0) return ERROR(corruption_detected);
|
||||
|
||||
/* get last non-null symbol weight (implied, total must be 2^n) */
|
||||
{ U32 const tableLog = BIT_highbit32(weightTotal) + 1;
|
||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
*tableLogPtr = tableLog;
|
||||
/* determine last weight */
|
||||
{ U32 const total = 1 << tableLog;
|
||||
U32 const rest = total - weightTotal;
|
||||
U32 const verif = 1 << BIT_highbit32(rest);
|
||||
U32 const lastWeight = BIT_highbit32(rest) + 1;
|
||||
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
|
||||
huffWeight[oSize] = (BYTE)lastWeight;
|
||||
rankStats[lastWeight]++;
|
||||
} }
|
||||
|
||||
/* check tree construction validity */
|
||||
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
|
||||
|
||||
/* results */
|
||||
*nbSymbolsPtr = (U32)(oSize+1);
|
||||
return iSize+1;
|
||||
}
|
48
third/github.com/DataDog/zstd/error_private.c
Normal file
48
third/github.com/DataDog/zstd/error_private.c
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* The purpose of this file is to have a single list of error strings embedded in binary */
|
||||
|
||||
#include "error_private.h"
|
||||
|
||||
const char* ERR_getErrorString(ERR_enum code)
|
||||
{
|
||||
static const char* const notErrorCode = "Unspecified error code";
|
||||
switch( code )
|
||||
{
|
||||
case PREFIX(no_error): return "No error detected";
|
||||
case PREFIX(GENERIC): return "Error (generic)";
|
||||
case PREFIX(prefix_unknown): return "Unknown frame descriptor";
|
||||
case PREFIX(version_unsupported): return "Version not supported";
|
||||
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
|
||||
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
|
||||
case PREFIX(corruption_detected): return "Corrupted block detected";
|
||||
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
|
||||
case PREFIX(parameter_unsupported): return "Unsupported parameter";
|
||||
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
|
||||
case PREFIX(init_missing): return "Context should be init first";
|
||||
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
|
||||
case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
|
||||
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
|
||||
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
|
||||
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
|
||||
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
|
||||
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
|
||||
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
|
||||
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
|
||||
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
|
||||
case PREFIX(srcSize_wrong): return "Src size is incorrect";
|
||||
/* following error codes are not stable and may be removed or changed in a future version */
|
||||
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
|
||||
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
|
||||
case PREFIX(maxCode):
|
||||
default: return notErrorCode;
|
||||
}
|
||||
}
|
76
third/github.com/DataDog/zstd/error_private.h
Normal file
76
third/github.com/DataDog/zstd/error_private.h
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* Note : this module is expected to remain private, do not expose it */
|
||||
|
||||
#ifndef ERROR_H_MODULE
|
||||
#define ERROR_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "zstd_errors.h" /* enum list */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Compiler-specific
|
||||
******************************************/
|
||||
#if defined(__GNUC__)
|
||||
# define ERR_STATIC static __attribute__((unused))
|
||||
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define ERR_STATIC static inline
|
||||
#elif defined(_MSC_VER)
|
||||
# define ERR_STATIC static __inline
|
||||
#else
|
||||
# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
#endif
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Customization (error_public.h)
|
||||
******************************************/
|
||||
typedef ZSTD_ErrorCode ERR_enum;
|
||||
#define PREFIX(name) ZSTD_error_##name
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Error codes handling
|
||||
******************************************/
|
||||
#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
|
||||
#define ERROR(name) ZSTD_ERROR(name)
|
||||
#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
|
||||
|
||||
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
|
||||
|
||||
ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Error Strings
|
||||
******************************************/
|
||||
|
||||
const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
|
||||
|
||||
ERR_STATIC const char* ERR_getErrorName(size_t code)
|
||||
{
|
||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ERROR_H_MODULE */
|
35
third/github.com/DataDog/zstd/errors.go
Normal file
35
third/github.com/DataDog/zstd/errors.go
Normal file
@ -0,0 +1,35 @@
|
||||
package zstd
|
||||
|
||||
/*
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "zstd.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// ErrorCode is an error returned by the zstd library.
|
||||
type ErrorCode int
|
||||
|
||||
// Error returns the error string given by zstd
|
||||
func (e ErrorCode) Error() string {
|
||||
return C.GoString(C.ZSTD_getErrorName(C.size_t(e)))
|
||||
}
|
||||
|
||||
func cIsError(code int) bool {
|
||||
return int(C.ZSTD_isError(C.size_t(code))) != 0
|
||||
}
|
||||
|
||||
// getError returns an error for the return code, or nil if it's not an error
|
||||
func getError(code int) error {
|
||||
if code < 0 && cIsError(code) {
|
||||
return ErrorCode(code)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDstSizeTooSmallError returns whether the error correspond to zstd standard sDstSizeTooSmall error
|
||||
func IsDstSizeTooSmallError(e error) bool {
|
||||
if e != nil && e.Error() == "Destination buffer is too small" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
29
third/github.com/DataDog/zstd/errors_test.go
Normal file
29
third/github.com/DataDog/zstd/errors_test.go
Normal file
@ -0,0 +1,29 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
// ErrorUpperBound is the upper bound to error number, currently only used in test
|
||||
// If this needs to be updated, check in zstd_errors.h what the max is
|
||||
ErrorUpperBound = 1000
|
||||
)
|
||||
|
||||
// TestFindIsDstSizeTooSmallError tests that there is at least one error code that
|
||||
// corresponds to dst size too small
|
||||
func TestFindIsDstSizeTooSmallError(t *testing.T) {
|
||||
found := 0
|
||||
for i := -1; i > -ErrorUpperBound; i-- {
|
||||
e := ErrorCode(i)
|
||||
if IsDstSizeTooSmallError(e) {
|
||||
found++
|
||||
}
|
||||
}
|
||||
|
||||
if found == 0 {
|
||||
t.Fatal("Couldn't find an error code for DstSizeTooSmall error, please make sure we didn't change the error string")
|
||||
} else if found > 1 {
|
||||
t.Fatal("IsDstSizeTooSmallError found multiple error codes matching, this shouldn't be the case")
|
||||
}
|
||||
}
|
704
third/github.com/DataDog/zstd/fse.h
Normal file
704
third/github.com/DataDog/zstd/fse.h
Normal file
@ -0,0 +1,704 @@
|
||||
/* ******************************************************************
|
||||
FSE : Finite State Entropy codec
|
||||
Public Prototypes declaration
|
||||
Copyright (C) 2013-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
****************************************************************** */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef FSE_H
|
||||
#define FSE_H
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <stddef.h> /* size_t, ptrdiff_t */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE_PUBLIC_API : control library symbols visibility
|
||||
******************************************/
|
||||
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
|
||||
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
|
||||
# define FSE_PUBLIC_API __declspec(dllexport)
|
||||
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
|
||||
# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define FSE_PUBLIC_API
|
||||
#endif
|
||||
|
||||
/*------ Version ------*/
|
||||
#define FSE_VERSION_MAJOR 0
|
||||
#define FSE_VERSION_MINOR 9
|
||||
#define FSE_VERSION_RELEASE 0
|
||||
|
||||
#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
|
||||
#define FSE_QUOTE(str) #str
|
||||
#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
|
||||
#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
|
||||
|
||||
#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
|
||||
FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
|
||||
|
||||
/*-****************************************
|
||||
* FSE simple functions
|
||||
******************************************/
|
||||
/*! FSE_compress() :
|
||||
Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
|
||||
'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
|
||||
@return : size of compressed data (<= dstCapacity).
|
||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
||||
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
|
||||
if FSE_isError(return), compression failed (more details using FSE_getErrorName())
|
||||
*/
|
||||
FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/*! FSE_decompress():
|
||||
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
|
||||
into already allocated destination buffer 'dst', of size 'dstCapacity'.
|
||||
@return : size of regenerated data (<= maxDstSize),
|
||||
or an error code, which can be tested using FSE_isError() .
|
||||
|
||||
** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
|
||||
Why ? : making this distinction requires a header.
|
||||
Header management is intentionally delegated to the user layer, which can better manage special cases.
|
||||
*/
|
||||
FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
|
||||
const void* cSrc, size_t cSrcSize);
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Tool functions
|
||||
******************************************/
|
||||
FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
|
||||
|
||||
/* Error Management */
|
||||
FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
|
||||
FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE advanced functions
|
||||
******************************************/
|
||||
/*! FSE_compress2() :
|
||||
Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
|
||||
Both parameters can be defined as '0' to mean : use default value
|
||||
@return : size of compressed data
|
||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
|
||||
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
|
||||
if FSE_isError(return), it's an error code.
|
||||
*/
|
||||
FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE detailed API
|
||||
******************************************/
|
||||
/*!
|
||||
FSE_compress() does the following:
|
||||
1. count symbol occurrence from source[] into table count[]
|
||||
2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
3. save normalized counters to memory buffer using writeNCount()
|
||||
4. build encoding table 'CTable' from normalized counters
|
||||
5. encode the data stream using encoding table 'CTable'
|
||||
|
||||
FSE_decompress() does the following:
|
||||
1. read normalized counters with readNCount()
|
||||
2. build decoding table 'DTable' from normalized counters
|
||||
3. decode the data stream using decoding table 'DTable'
|
||||
|
||||
The following API allows targeting specific sub-functions for advanced tasks.
|
||||
For example, it's possible to compress several blocks using the same 'CTable',
|
||||
or to save and provide normalized distribution using external method.
|
||||
*/
|
||||
|
||||
/* *** COMPRESSION *** */
|
||||
|
||||
/*! FSE_count():
|
||||
Provides the precise count of each byte within a table 'count'.
|
||||
'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
|
||||
*maxSymbolValuePtr will be updated if detected smaller than initial value.
|
||||
@return : the count of the most frequent symbol (which is not identified).
|
||||
if return == srcSize, there is only one symbol.
|
||||
Can also return an error code, which can be tested with FSE_isError(). */
|
||||
FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
/*! FSE_optimalTableLog():
|
||||
dynamically downsize 'tableLog' when conditions are met.
|
||||
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
|
||||
@return : recommended tableLog (necessarily <= 'maxTableLog') */
|
||||
FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
|
||||
/*! FSE_normalizeCount():
|
||||
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
|
||||
@return : tableLog,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
|
||||
|
||||
/*! FSE_NCountWriteBound():
|
||||
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
|
||||
Typically useful for allocation purpose. */
|
||||
FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_writeNCount():
|
||||
Compactly save 'normalizedCounter' into 'buffer'.
|
||||
@return : size of the compressed table,
|
||||
or an errorCode, which can be tested using FSE_isError(). */
|
||||
FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
|
||||
/*! Constructor and Destructor of FSE_CTable.
|
||||
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
|
||||
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
|
||||
FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
|
||||
FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
|
||||
|
||||
/*! FSE_buildCTable():
|
||||
Builds `ct`, which must be already allocated, using FSE_createCTable().
|
||||
@return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_compress_usingCTable():
|
||||
Compress `src` using `ct` into `dst` which must be already allocated.
|
||||
@return : size of compressed data (<= `dstCapacity`),
|
||||
or 0 if compressed data could not fit into `dst`,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
----------
|
||||
The first step is to count all symbols. FSE_count() does this job very fast.
|
||||
Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
|
||||
'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
|
||||
maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
|
||||
FSE_count() will return the number of occurrence of the most frequent symbol.
|
||||
This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
The next step is to normalize the frequencies.
|
||||
FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
|
||||
It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
|
||||
You can use 'tableLog'==0 to mean "use default tableLog value".
|
||||
If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
|
||||
which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
|
||||
|
||||
The result of FSE_normalizeCount() will be saved into a table,
|
||||
called 'normalizedCounter', which is a table of signed short.
|
||||
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
|
||||
The return value is tableLog if everything proceeded as expected.
|
||||
It is 0 if there is a single symbol within distribution.
|
||||
If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
|
||||
'buffer' must be already allocated.
|
||||
For guaranteed success, buffer size must be at least FSE_headerBound().
|
||||
The result of the function is the number of bytes written into 'buffer'.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
|
||||
|
||||
'normalizedCounter' can then be used to create the compression table 'CTable'.
|
||||
The space required by 'CTable' must be already allocated, using FSE_createCTable().
|
||||
You can then use FSE_buildCTable() to fill 'CTable'.
|
||||
If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
|
||||
Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
|
||||
The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
|
||||
If it returns '0', compressed data could not fit into 'dst'.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
*/
|
||||
|
||||
|
||||
/* *** DECOMPRESSION *** */
|
||||
|
||||
/*! FSE_readNCount():
|
||||
Read compactly saved 'normalizedCounter' from 'rBuffer'.
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
|
||||
|
||||
/*! Constructor and Destructor of FSE_DTable.
|
||||
Note that its size depends on 'tableLog' */
|
||||
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
|
||||
FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
|
||||
FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
|
||||
|
||||
/*! FSE_buildDTable():
|
||||
Builds 'dt', which must be already allocated, using FSE_createDTable().
|
||||
return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_decompress_usingDTable():
|
||||
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
|
||||
into `dst` which must be already allocated.
|
||||
@return : size of regenerated data (necessarily <= `dstCapacity`),
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
----------
|
||||
(Note : these functions only decompress FSE-compressed blocks.
|
||||
If block is uncompressed, use memcpy() instead
|
||||
If block is a single repeated byte, use memset() instead )
|
||||
|
||||
The first step is to obtain the normalized frequencies of symbols.
|
||||
This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
|
||||
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
|
||||
In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
|
||||
or size the table to handle worst case situations (typically 256).
|
||||
FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
|
||||
The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
|
||||
Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError().
|
||||
|
||||
The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
|
||||
This is performed by the function FSE_buildDTable().
|
||||
The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError().
|
||||
|
||||
`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
|
||||
`cSrcSize` must be strictly correct, otherwise decompression will fail.
|
||||
FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
|
||||
*/
|
||||
|
||||
#endif /* FSE_H */
|
||||
|
||||
#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
|
||||
#define FSE_H_FSE_STATIC_LINKING_ONLY
|
||||
|
||||
/* *** Dependency *** */
|
||||
#include "bitstream.h"
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* Static allocation
|
||||
*******************************************/
|
||||
/* FSE buffer bounds */
|
||||
#define FSE_NCOUNTBOUND 512
|
||||
#define FSE_BLOCKBOUND(size) (size + (size>>7))
|
||||
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
|
||||
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
|
||||
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
|
||||
|
||||
/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
|
||||
#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
|
||||
#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE advanced API
|
||||
*******************************************/
|
||||
/* FSE_count_wksp() :
|
||||
* Same as FSE_count(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned
|
||||
*/
|
||||
size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace);
|
||||
|
||||
/** FSE_countFast() :
|
||||
* same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr
|
||||
*/
|
||||
size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
/* FSE_countFast_wksp() :
|
||||
* Same as FSE_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` must be a table of minimum `1024` unsigned
|
||||
*/
|
||||
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
|
||||
|
||||
/*! FSE_count_simple() :
|
||||
* Same as FSE_countFast(), but does not use any additional memory (not even on stack).
|
||||
* This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
|
||||
*/
|
||||
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
|
||||
/**< same as FSE_optimalTableLog(), which used `minus==2` */
|
||||
|
||||
/* FSE_compress_wksp() :
|
||||
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
|
||||
*/
|
||||
#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
|
||||
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
|
||||
/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
|
||||
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
|
||||
/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` must be >= `(1<<tableLog)`.
|
||||
*/
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
|
||||
/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
|
||||
|
||||
size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
|
||||
/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
|
||||
|
||||
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
|
||||
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
|
||||
|
||||
typedef enum {
|
||||
FSE_repeat_none, /**< Cannot use the previous table */
|
||||
FSE_repeat_check, /**< Can use the previous table but it must be checked */
|
||||
FSE_repeat_valid /**< Can use the previous table and it is asumed to be valid */
|
||||
} FSE_repeat;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
typedef struct {
|
||||
ptrdiff_t value;
|
||||
const void* stateTable;
|
||||
const void* symbolTT;
|
||||
unsigned stateLog;
|
||||
} FSE_CState_t;
|
||||
|
||||
static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
|
||||
|
||||
static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
|
||||
|
||||
static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
|
||||
|
||||
/**<
|
||||
These functions are inner components of FSE_compress_usingCTable().
|
||||
They allow the creation of custom streams, mixing multiple tables and bit sources.
|
||||
|
||||
A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
|
||||
So the first symbol you will encode is the last you will decode, like a LIFO stack.
|
||||
|
||||
You will need a few variables to track your CStream. They are :
|
||||
|
||||
FSE_CTable ct; // Provided by FSE_buildCTable()
|
||||
BIT_CStream_t bitStream; // bitStream tracking structure
|
||||
FSE_CState_t state; // State tracking structure (can have several)
|
||||
|
||||
|
||||
The first thing to do is to init bitStream and state.
|
||||
size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
|
||||
FSE_initCState(&state, ct);
|
||||
|
||||
Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
|
||||
You can then encode your input data, byte after byte.
|
||||
FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
|
||||
Remember decoding will be done in reverse direction.
|
||||
FSE_encodeByte(&bitStream, &state, symbol);
|
||||
|
||||
At any time, you can also add any bit sequence.
|
||||
Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
|
||||
BIT_addBits(&bitStream, bitField, nbBits);
|
||||
|
||||
The above methods don't commit data to memory, they just store it into local register, for speed.
|
||||
Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
|
||||
Writing data to memory is a manual operation, performed by the flushBits function.
|
||||
BIT_flushBits(&bitStream);
|
||||
|
||||
Your last FSE encoding operation shall be to flush your last state value(s).
|
||||
FSE_flushState(&bitStream, &state);
|
||||
|
||||
Finally, you must close the bitStream.
|
||||
The function returns the size of CStream in bytes.
|
||||
If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
|
||||
If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
|
||||
size_t size = BIT_closeCStream(&bitStream);
|
||||
*/
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
typedef struct {
|
||||
size_t state;
|
||||
const void* table; /* precise table may vary, depending on U16 */
|
||||
} FSE_DState_t;
|
||||
|
||||
|
||||
static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
|
||||
|
||||
static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
|
||||
|
||||
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
|
||||
|
||||
/**<
|
||||
Let's now decompose FSE_decompress_usingDTable() into its unitary components.
|
||||
You will decode FSE-encoded symbols from the bitStream,
|
||||
and also any other bitFields you put in, **in reverse order**.
|
||||
|
||||
You will need a few variables to track your bitStream. They are :
|
||||
|
||||
BIT_DStream_t DStream; // Stream context
|
||||
FSE_DState_t DState; // State context. Multiple ones are possible
|
||||
FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
|
||||
|
||||
The first thing to do is to init the bitStream.
|
||||
errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
|
||||
|
||||
You should then retrieve your initial state(s)
|
||||
(in reverse flushing order if you have several ones) :
|
||||
errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
|
||||
|
||||
You can then decode your data, symbol after symbol.
|
||||
For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
|
||||
Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
|
||||
unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
|
||||
|
||||
You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
|
||||
Note : maximum allowed nbBits is 25, for 32-bits compatibility
|
||||
size_t bitField = BIT_readBits(&DStream, nbBits);
|
||||
|
||||
All above operations only read from local register (which size depends on size_t).
|
||||
Refueling the register from memory is manually performed by the reload method.
|
||||
endSignal = FSE_reloadDStream(&DStream);
|
||||
|
||||
BIT_reloadDStream() result tells if there is still some more data to read from DStream.
|
||||
BIT_DStream_unfinished : there is still some data left into the DStream.
|
||||
BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
|
||||
BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
|
||||
BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
|
||||
|
||||
When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
|
||||
to properly detect the exact end of stream.
|
||||
After each decoded symbol, check if DStream is fully consumed using this simple test :
|
||||
BIT_reloadDStream(&DStream) >= BIT_DStream_completed
|
||||
|
||||
When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
|
||||
Checking if DStream has reached its end is performed by :
|
||||
BIT_endOfDStream(&DStream);
|
||||
Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
|
||||
FSE_endOfDState(&DState);
|
||||
*/
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE unsafe API
|
||||
*******************************************/
|
||||
static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
|
||||
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* Implementation of inlined functions
|
||||
*******************************************/
|
||||
typedef struct {
|
||||
int deltaFindState;
|
||||
U32 deltaNbBits;
|
||||
} FSE_symbolCompressionTransform; /* total 8 bytes */
|
||||
|
||||
MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
|
||||
{
|
||||
const void* ptr = ct;
|
||||
const U16* u16ptr = (const U16*) ptr;
|
||||
const U32 tableLog = MEM_read16(ptr);
|
||||
statePtr->value = (ptrdiff_t)1<<tableLog;
|
||||
statePtr->stateTable = u16ptr+2;
|
||||
statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
|
||||
statePtr->stateLog = tableLog;
|
||||
}
|
||||
|
||||
|
||||
/*! FSE_initCState2() :
|
||||
* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
|
||||
* uses the smallest state value possible, saving the cost of this symbol */
|
||||
MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
|
||||
{
|
||||
FSE_initCState(statePtr, ct);
|
||||
{ const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
const U16* stateTable = (const U16*)(statePtr->stateTable);
|
||||
U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
|
||||
statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
|
||||
statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
|
||||
{
|
||||
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
const U16* const stateTable = (const U16*)(statePtr->stateTable);
|
||||
U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
|
||||
BIT_addBits(bitC, statePtr->value, nbBitsOut);
|
||||
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
|
||||
{
|
||||
BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
|
||||
BIT_flushBits(bitC);
|
||||
}
|
||||
|
||||
|
||||
/* ====== Decompression ====== */
|
||||
|
||||
typedef struct {
|
||||
U16 tableLog;
|
||||
U16 fastMode;
|
||||
} FSE_DTableHeader; /* sizeof U32 */
|
||||
|
||||
typedef struct
|
||||
{
|
||||
unsigned short newState;
|
||||
unsigned char symbol;
|
||||
unsigned char nbBits;
|
||||
} FSE_decode_t; /* size == U32 */
|
||||
|
||||
MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
|
||||
{
|
||||
const void* ptr = dt;
|
||||
const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
|
||||
DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
|
||||
BIT_reloadDStream(bitD);
|
||||
DStatePtr->table = dt + 1;
|
||||
}
|
||||
|
||||
MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
return DInfo.symbol;
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
size_t const lowBits = BIT_readBits(bitD, nbBits);
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
}
|
||||
|
||||
MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
BYTE const symbol = DInfo.symbol;
|
||||
size_t const lowBits = BIT_readBits(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
/*! FSE_decodeSymbolFast() :
|
||||
unsafe, only works if no symbol has a probability > 50% */
|
||||
MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
BYTE const symbol = DInfo.symbol;
|
||||
size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
|
||||
{
|
||||
return DStatePtr->state == 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/* **************************************************************
|
||||
* Tuning parameters
|
||||
****************************************************************/
|
||||
/*!MEMORY_USAGE :
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
||||
* Increasing memory usage improves compression ratio
|
||||
* Reduced memory usage can improve speed, due to cache effect
|
||||
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
|
||||
#ifndef FSE_MAX_MEMORY_USAGE
|
||||
# define FSE_MAX_MEMORY_USAGE 14
|
||||
#endif
|
||||
#ifndef FSE_DEFAULT_MEMORY_USAGE
|
||||
# define FSE_DEFAULT_MEMORY_USAGE 13
|
||||
#endif
|
||||
|
||||
/*!FSE_MAX_SYMBOL_VALUE :
|
||||
* Maximum symbol value authorized.
|
||||
* Required for proper stack allocation */
|
||||
#ifndef FSE_MAX_SYMBOL_VALUE
|
||||
# define FSE_MAX_SYMBOL_VALUE 255
|
||||
#endif
|
||||
|
||||
/* **************************************************************
|
||||
* template functions type & suffix
|
||||
****************************************************************/
|
||||
#define FSE_FUNCTION_TYPE BYTE
|
||||
#define FSE_FUNCTION_EXTENSION
|
||||
#define FSE_DECODE_TYPE FSE_decode_t
|
||||
|
||||
|
||||
#endif /* !FSE_COMMONDEFS_ONLY */
|
||||
|
||||
|
||||
/* ***************************************************************
|
||||
* Constants
|
||||
*****************************************************************/
|
||||
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
|
||||
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
|
||||
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
|
||||
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
|
||||
#define FSE_MIN_TABLELOG 5
|
||||
|
||||
#define FSE_TABLELOG_ABSOLUTE_MAX 15
|
||||
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
|
||||
# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
|
||||
#endif
|
||||
|
||||
#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
|
||||
|
||||
|
||||
#endif /* FSE_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
849
third/github.com/DataDog/zstd/fse_compress.c
Normal file
849
third/github.com/DataDog/zstd/fse_compress.c
Normal file
@ -0,0 +1,849 @@
|
||||
/* ******************************************************************
|
||||
FSE : Finite State Entropy encoder
|
||||
Copyright (C) 2013-2015, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include <stdio.h> /* printf (debug) */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
/*
|
||||
designed to be included
|
||||
for type-specific functions (template emulation in C)
|
||||
Objective is to write these functions only once, for improved maintenance
|
||||
*/
|
||||
|
||||
/* safety checks */
|
||||
#ifndef FSE_FUNCTION_EXTENSION
|
||||
# error "FSE_FUNCTION_EXTENSION must be defined"
|
||||
#endif
|
||||
#ifndef FSE_FUNCTION_TYPE
|
||||
# error "FSE_FUNCTION_TYPE must be defined"
|
||||
#endif
|
||||
|
||||
/* Function names */
|
||||
#define FSE_CAT(X,Y) X##Y
|
||||
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
||||
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
||||
|
||||
|
||||
/* Function templates */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
|
||||
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
|
||||
*/
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 const tableMask = tableSize - 1;
|
||||
void* const ptr = ct;
|
||||
U16* const tableU16 = ( (U16*) ptr) + 2;
|
||||
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
|
||||
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
|
||||
|
||||
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
/* CTable header */
|
||||
if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
tableU16[-2] = (U16) tableLog;
|
||||
tableU16[-1] = (U16) maxSymbolValue;
|
||||
|
||||
/* For explanations on how to distribute symbol values over the table :
|
||||
* http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
|
||||
|
||||
/* symbol start positions */
|
||||
{ U32 u;
|
||||
cumul[0] = 0;
|
||||
for (u=1; u<=maxSymbolValue+1; u++) {
|
||||
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
|
||||
cumul[u] = cumul[u-1] + 1;
|
||||
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
|
||||
} else {
|
||||
cumul[u] = cumul[u-1] + normalizedCounter[u-1];
|
||||
} }
|
||||
cumul[maxSymbolValue+1] = tableSize+1;
|
||||
}
|
||||
|
||||
/* Spread symbols */
|
||||
{ U32 position = 0;
|
||||
U32 symbol;
|
||||
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
|
||||
int nbOccurences;
|
||||
for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) {
|
||||
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold) position = (position + step) & tableMask; /* Low proba area */
|
||||
} }
|
||||
|
||||
if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */
|
||||
}
|
||||
|
||||
/* Build table */
|
||||
{ U32 u; for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
|
||||
tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
|
||||
} }
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
{ unsigned total = 0;
|
||||
unsigned s;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
switch (normalizedCounter[s])
|
||||
{
|
||||
case 0: break;
|
||||
|
||||
case -1:
|
||||
case 1:
|
||||
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
|
||||
symbolTT[s].deltaFindState = total - 1;
|
||||
total ++;
|
||||
break;
|
||||
default :
|
||||
{
|
||||
U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
|
||||
U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
|
||||
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
|
||||
symbolTT[s].deltaFindState = total - normalizedCounter[s];
|
||||
total += normalizedCounter[s];
|
||||
} } } }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
|
||||
return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
|
||||
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
|
||||
}
|
||||
|
||||
static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
|
||||
unsigned writeIsSafe)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) header;
|
||||
BYTE* out = ostart;
|
||||
BYTE* const oend = ostart + headerBufferSize;
|
||||
int nbBits;
|
||||
const int tableSize = 1 << tableLog;
|
||||
int remaining;
|
||||
int threshold;
|
||||
U32 bitStream;
|
||||
int bitCount;
|
||||
unsigned charnum = 0;
|
||||
int previous0 = 0;
|
||||
|
||||
bitStream = 0;
|
||||
bitCount = 0;
|
||||
/* Table Size */
|
||||
bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
|
||||
bitCount += 4;
|
||||
|
||||
/* Init */
|
||||
remaining = tableSize+1; /* +1 for extra accuracy */
|
||||
threshold = tableSize;
|
||||
nbBits = tableLog+1;
|
||||
|
||||
while (remaining>1) { /* stops at 1 */
|
||||
if (previous0) {
|
||||
unsigned start = charnum;
|
||||
while (!normalizedCounter[charnum]) charnum++;
|
||||
while (charnum >= start+24) {
|
||||
start+=24;
|
||||
bitStream += 0xFFFFU << bitCount;
|
||||
if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE) bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out+=2;
|
||||
bitStream>>=16;
|
||||
}
|
||||
while (charnum >= start+3) {
|
||||
start+=3;
|
||||
bitStream += 3 << bitCount;
|
||||
bitCount += 2;
|
||||
}
|
||||
bitStream += (charnum-start) << bitCount;
|
||||
bitCount += 2;
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
{ int count = normalizedCounter[charnum++];
|
||||
int const max = (2*threshold-1)-remaining;
|
||||
remaining -= count < 0 ? -count : count;
|
||||
count++; /* +1 for extra accuracy */
|
||||
if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
|
||||
bitStream += count << bitCount;
|
||||
bitCount += nbBits;
|
||||
bitCount -= (count<max);
|
||||
previous0 = (count==1);
|
||||
if (remaining<1) return ERROR(GENERIC);
|
||||
while (remaining<threshold) { nbBits--; threshold>>=1; }
|
||||
}
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
|
||||
/* flush remaining bitStream */
|
||||
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out+= (bitCount+7) /8;
|
||||
|
||||
if (charnum > maxSymbolValue + 1) return ERROR(GENERIC);
|
||||
|
||||
return (out-ostart);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
|
||||
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
|
||||
|
||||
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
|
||||
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Counting histogram
|
||||
****************************************************************/
|
||||
/*! FSE_count_simple
|
||||
This function counts byte values within `src`, and store the histogram into table `count`.
|
||||
It doesn't use any additional memory.
|
||||
But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
|
||||
For this reason, prefer using a table `count` with 256 elements.
|
||||
@return : count of most numerous element.
|
||||
*/
|
||||
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
const BYTE* const end = ip + srcSize;
|
||||
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
||||
unsigned max=0;
|
||||
|
||||
memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
|
||||
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
||||
|
||||
while (ip<end) {
|
||||
assert(*ip <= maxSymbolValue);
|
||||
count[*ip++]++;
|
||||
}
|
||||
|
||||
while (!count[maxSymbolValue]) maxSymbolValue--;
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
|
||||
{ U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; }
|
||||
|
||||
return (size_t)max;
|
||||
}
|
||||
|
||||
|
||||
/* FSE_count_parallel_wksp() :
|
||||
* Same as FSE_count_parallel(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`.
|
||||
* @return : largest histogram frequency, or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
|
||||
static size_t FSE_count_parallel_wksp(
|
||||
unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
unsigned checkMax, unsigned* const workSpace)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)source;
|
||||
const BYTE* const iend = ip+sourceSize;
|
||||
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
||||
unsigned max=0;
|
||||
U32* const Counting1 = workSpace;
|
||||
U32* const Counting2 = Counting1 + 256;
|
||||
U32* const Counting3 = Counting2 + 256;
|
||||
U32* const Counting4 = Counting3 + 256;
|
||||
|
||||
memset(workSpace, 0, 4*256*sizeof(unsigned));
|
||||
|
||||
/* safety checks */
|
||||
if (!sourceSize) {
|
||||
memset(count, 0, maxSymbolValue + 1);
|
||||
*maxSymbolValuePtr = 0;
|
||||
return 0;
|
||||
}
|
||||
if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
|
||||
|
||||
/* by stripes of 16 bytes */
|
||||
{ U32 cached = MEM_read32(ip); ip += 4;
|
||||
while (ip < iend-15) {
|
||||
U32 c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
}
|
||||
ip-=4;
|
||||
}
|
||||
|
||||
/* finish last symbols */
|
||||
while (ip<iend) Counting1[*ip++]++;
|
||||
|
||||
if (checkMax) { /* verify stats will fit into destination table */
|
||||
U32 s; for (s=255; s>maxSymbolValue; s--) {
|
||||
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
|
||||
} }
|
||||
|
||||
{ U32 s;
|
||||
if (maxSymbolValue > 255) maxSymbolValue = 255;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (count[s] > max) max = count[s];
|
||||
} }
|
||||
|
||||
while (!count[maxSymbolValue]) maxSymbolValue--;
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
return (size_t)max;
|
||||
}
|
||||
|
||||
/* FSE_countFast_wksp() :
|
||||
* Same as FSE_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned */
|
||||
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
unsigned* workSpace)
|
||||
{
|
||||
if (sourceSize < 1500) /* heuristic threshold */
|
||||
return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
||||
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
|
||||
}
|
||||
|
||||
/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
|
||||
size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize)
|
||||
{
|
||||
unsigned tmpCounters[1024];
|
||||
return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
|
||||
}
|
||||
|
||||
/* FSE_count_wksp() :
|
||||
* Same as FSE_count(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= `1024` unsigned */
|
||||
size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize, unsigned* workSpace)
|
||||
{
|
||||
if (*maxSymbolValuePtr < 255)
|
||||
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
|
||||
*maxSymbolValuePtr = 255;
|
||||
return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
|
||||
}
|
||||
|
||||
size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
unsigned tmpCounters[1024];
|
||||
return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE Compression Code
|
||||
****************************************************************/
|
||||
/*! FSE_sizeof_CTable() :
|
||||
FSE_CTable is a variable size structure which contains :
|
||||
`U16 tableLog;`
|
||||
`U16 maxSymbolValue;`
|
||||
`U16 nextStateNumber[1 << tableLog];` // This size is variable
|
||||
`FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable
|
||||
Allocation is manual (C standard does not support variable-size structures).
|
||||
*/
|
||||
size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
|
||||
}
|
||||
|
||||
FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
size_t size;
|
||||
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
|
||||
size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
|
||||
return (FSE_CTable*)malloc(size);
|
||||
}
|
||||
|
||||
void FSE_freeCTable (FSE_CTable* ct) { free(ct); }
|
||||
|
||||
/* provides the minimum logSize to safely represent a distribution */
|
||||
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
|
||||
U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
|
||||
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
|
||||
assert(srcSize > 1); /* Not supported, RLE should be used instead */
|
||||
return minBits;
|
||||
}
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
|
||||
{
|
||||
U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
|
||||
U32 tableLog = maxTableLog;
|
||||
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
|
||||
assert(srcSize > 1); /* Not supported, RLE should be used instead */
|
||||
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
|
||||
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
|
||||
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
|
||||
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
|
||||
}
|
||||
|
||||
|
||||
/* Secondary normalization method.
|
||||
To be used when primary method fails. */
|
||||
|
||||
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
|
||||
{
|
||||
short const NOT_YET_ASSIGNED = -2;
|
||||
U32 s;
|
||||
U32 distributed = 0;
|
||||
U32 ToDistribute;
|
||||
|
||||
/* Init */
|
||||
U32 const lowThreshold = (U32)(total >> tableLog);
|
||||
U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (count[s] == 0) {
|
||||
norm[s]=0;
|
||||
continue;
|
||||
}
|
||||
if (count[s] <= lowThreshold) {
|
||||
norm[s] = -1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
if (count[s] <= lowOne) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
|
||||
norm[s]=NOT_YET_ASSIGNED;
|
||||
}
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
|
||||
if ((total / ToDistribute) > lowOne) {
|
||||
/* risk of rounding to zero */
|
||||
lowOne = (U32)((total * 3) / (ToDistribute * 2));
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
} }
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
}
|
||||
|
||||
if (distributed == maxSymbolValue+1) {
|
||||
/* all values are pretty poor;
|
||||
probably incompressible data (should have already been detected);
|
||||
find max, then give all remaining points to max */
|
||||
U32 maxV = 0, maxC = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
if (count[s] > maxC) { maxV=s; maxC=count[s]; }
|
||||
norm[maxV] += (short)ToDistribute;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (total == 0) {
|
||||
/* all of the symbols were low enough for the lowOne or lowThreshold */
|
||||
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
|
||||
if (norm[s] > 0) { ToDistribute--; norm[s]++; }
|
||||
return 0;
|
||||
}
|
||||
|
||||
{ U64 const vStepLog = 62 - tableLog;
|
||||
U64 const mid = (1ULL << (vStepLog-1)) - 1;
|
||||
U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */
|
||||
U64 tmpTotal = mid;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (norm[s]==NOT_YET_ASSIGNED) {
|
||||
U64 const end = tmpTotal + (count[s] * rStep);
|
||||
U32 const sStart = (U32)(tmpTotal >> vStepLog);
|
||||
U32 const sEnd = (U32)(end >> vStepLog);
|
||||
U32 const weight = sEnd - sStart;
|
||||
if (weight < 1)
|
||||
return ERROR(GENERIC);
|
||||
norm[s] = (short)weight;
|
||||
tmpTotal = end;
|
||||
} } }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
||||
const unsigned* count, size_t total,
|
||||
unsigned maxSymbolValue)
|
||||
{
|
||||
/* Sanity checks */
|
||||
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
|
||||
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
|
||||
|
||||
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
|
||||
U64 const scale = 62 - tableLog;
|
||||
U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
|
||||
U64 const vStep = 1ULL<<(scale-20);
|
||||
int stillToDistribute = 1<<tableLog;
|
||||
unsigned s;
|
||||
unsigned largest=0;
|
||||
short largestP=0;
|
||||
U32 lowThreshold = (U32)(total >> tableLog);
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (count[s] == total) return 0; /* rle special case */
|
||||
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
|
||||
if (count[s] <= lowThreshold) {
|
||||
normalizedCounter[s] = -1;
|
||||
stillToDistribute--;
|
||||
} else {
|
||||
short proba = (short)((count[s]*step) >> scale);
|
||||
if (proba<8) {
|
||||
U64 restToBeat = vStep * rtbTable[proba];
|
||||
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
|
||||
}
|
||||
if (proba > largestP) { largestP=proba; largest=s; }
|
||||
normalizedCounter[s] = proba;
|
||||
stillToDistribute -= proba;
|
||||
} }
|
||||
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
|
||||
/* corner case, need another normalization method */
|
||||
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
}
|
||||
else normalizedCounter[largest] += (short)stillToDistribute;
|
||||
}
|
||||
|
||||
#if 0
|
||||
{ /* Print Table (debug) */
|
||||
U32 s;
|
||||
U32 nTotal = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
printf("%3i: %4i \n", s, normalizedCounter[s]);
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
nTotal += abs(normalizedCounter[s]);
|
||||
if (nTotal != (1U<<tableLog))
|
||||
printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
|
||||
getchar();
|
||||
}
|
||||
#endif
|
||||
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
|
||||
/* fake FSE_CTable, for raw (uncompressed) input */
|
||||
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
|
||||
{
|
||||
const unsigned tableSize = 1 << nbBits;
|
||||
const unsigned tableMask = tableSize - 1;
|
||||
const unsigned maxSymbolValue = tableMask;
|
||||
void* const ptr = ct;
|
||||
U16* const tableU16 = ( (U16*) ptr) + 2;
|
||||
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
|
||||
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
||||
unsigned s;
|
||||
|
||||
/* Sanity checks */
|
||||
if (nbBits < 1) return ERROR(GENERIC); /* min size */
|
||||
|
||||
/* header */
|
||||
tableU16[-2] = (U16) nbBits;
|
||||
tableU16[-1] = (U16) maxSymbolValue;
|
||||
|
||||
/* Build table */
|
||||
for (s=0; s<tableSize; s++)
|
||||
tableU16[s] = (U16)(tableSize + s);
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
{ const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
symbolTT[s].deltaNbBits = deltaNbBits;
|
||||
symbolTT[s].deltaFindState = s-1;
|
||||
} }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fake FSE_CTable, for rle input (always same symbol) */
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
|
||||
{
|
||||
void* ptr = ct;
|
||||
U16* tableU16 = ( (U16*) ptr) + 2;
|
||||
void* FSCTptr = (U32*)ptr + 2;
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
|
||||
|
||||
/* header */
|
||||
tableU16[-2] = (U16) 0;
|
||||
tableU16[-1] = (U16) symbolValue;
|
||||
|
||||
/* Build table */
|
||||
tableU16[0] = 0;
|
||||
tableU16[1] = 0; /* just in case */
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
symbolTT[symbolValue].deltaNbBits = 0;
|
||||
symbolTT[symbolValue].deltaFindState = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct, const unsigned fast)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) src;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* ip=iend;
|
||||
|
||||
BIT_CStream_t bitC;
|
||||
FSE_CState_t CState1, CState2;
|
||||
|
||||
/* init */
|
||||
if (srcSize <= 2) return 0;
|
||||
{ size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
|
||||
if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
|
||||
|
||||
#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
|
||||
|
||||
if (srcSize & 1) {
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
} else {
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
}
|
||||
|
||||
/* join to mod 4 */
|
||||
srcSize -= 2;
|
||||
if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
/* 2 or 4 encoding per loop */
|
||||
while ( ip>istart ) {
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
|
||||
if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
|
||||
if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
}
|
||||
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
FSE_flushCState(&bitC, &CState2);
|
||||
FSE_flushCState(&bitC, &CState1);
|
||||
return BIT_closeCStream(&bitC);
|
||||
}
|
||||
|
||||
size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct)
|
||||
{
|
||||
unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
|
||||
|
||||
if (fast)
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
|
||||
else
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
|
||||
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
/* FSE_compress_wksp() :
|
||||
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` size must be `(1<<tableLog)`.
|
||||
*/
|
||||
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
|
||||
U32 count[FSE_MAX_SYMBOL_VALUE+1];
|
||||
S16 norm[FSE_MAX_SYMBOL_VALUE+1];
|
||||
FSE_CTable* CTable = (FSE_CTable*)workSpace;
|
||||
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
|
||||
void* scratchBuffer = (void*)(CTable + CTableSize);
|
||||
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
|
||||
|
||||
/* init conditions */
|
||||
if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
|
||||
if (srcSize <= 1) return 0; /* Not compressible */
|
||||
if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
||||
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
|
||||
if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
|
||||
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
|
||||
if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
|
||||
}
|
||||
|
||||
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
|
||||
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
|
||||
op += nc_err;
|
||||
}
|
||||
|
||||
/* Compress */
|
||||
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
|
||||
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
|
||||
if (cSize == 0) return 0; /* not enough space for compressed data */
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
/* check compressibility */
|
||||
if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
|
||||
BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
|
||||
} fseWkspMax_t;
|
||||
|
||||
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
fseWkspMax_t scratchBuffer;
|
||||
FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
|
||||
}
|
||||
|
||||
size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
|
||||
}
|
||||
|
||||
|
||||
#endif /* FSE_COMMONDEFS_ONLY */
|
309
third/github.com/DataDog/zstd/fse_decompress.c
Normal file
309
third/github.com/DataDog/zstd/fse_decompress.c
Normal file
@ -0,0 +1,309 @@
|
||||
/* ******************************************************************
|
||||
FSE : Finite State Entropy decoder
|
||||
Copyright (C) 2013-2015, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include <stdlib.h> /* malloc, free, qsort */
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
/* check and forward error code */
|
||||
#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
/*
|
||||
designed to be included
|
||||
for type-specific functions (template emulation in C)
|
||||
Objective is to write these functions only once, for improved maintenance
|
||||
*/
|
||||
|
||||
/* safety checks */
|
||||
#ifndef FSE_FUNCTION_EXTENSION
|
||||
# error "FSE_FUNCTION_EXTENSION must be defined"
|
||||
#endif
|
||||
#ifndef FSE_FUNCTION_TYPE
|
||||
# error "FSE_FUNCTION_TYPE must be defined"
|
||||
#endif
|
||||
|
||||
/* Function names */
|
||||
#define FSE_CAT(X,Y) X##Y
|
||||
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
||||
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
||||
|
||||
|
||||
/* Function templates */
|
||||
FSE_DTable* FSE_createDTable (unsigned tableLog)
|
||||
{
|
||||
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
|
||||
return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
|
||||
}
|
||||
|
||||
void FSE_freeDTable (FSE_DTable* dt)
|
||||
{
|
||||
free(dt);
|
||||
}
|
||||
|
||||
size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
|
||||
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
|
||||
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
|
||||
|
||||
U32 const maxSV1 = maxSymbolValue + 1;
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
/* Sanity Checks */
|
||||
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
|
||||
/* Init, lay down lowprob symbols */
|
||||
{ FSE_DTableHeader DTableH;
|
||||
DTableH.tableLog = (U16)tableLog;
|
||||
DTableH.fastMode = 1;
|
||||
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
|
||||
U32 s;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
if (normalizedCounter[s]==-1) {
|
||||
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
|
||||
symbolNext[s] = 1;
|
||||
} else {
|
||||
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
|
||||
symbolNext[s] = normalizedCounter[s];
|
||||
} } }
|
||||
memcpy(dt, &DTableH, sizeof(DTableH));
|
||||
}
|
||||
|
||||
/* Spread symbols */
|
||||
{ U32 const tableMask = tableSize-1;
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 s, position = 0;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
int i;
|
||||
for (i=0; i<normalizedCounter[s]; i++) {
|
||||
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
|
||||
} }
|
||||
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
|
||||
}
|
||||
|
||||
/* Build Decoding table */
|
||||
{ U32 u;
|
||||
for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
|
||||
U32 const nextState = symbolNext[symbol]++;
|
||||
tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
|
||||
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
|
||||
} }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/*-*******************************************************
|
||||
* Decompression (Byte symbols)
|
||||
*********************************************************/
|
||||
size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
|
||||
{
|
||||
void* ptr = dt;
|
||||
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
|
||||
void* dPtr = dt + 1;
|
||||
FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
|
||||
|
||||
DTableH->tableLog = 0;
|
||||
DTableH->fastMode = 0;
|
||||
|
||||
cell->newState = 0;
|
||||
cell->symbol = symbolValue;
|
||||
cell->nbBits = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
|
||||
{
|
||||
void* ptr = dt;
|
||||
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
|
||||
void* dPtr = dt + 1;
|
||||
FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
|
||||
const unsigned tableSize = 1 << nbBits;
|
||||
const unsigned tableMask = tableSize - 1;
|
||||
const unsigned maxSV1 = tableMask+1;
|
||||
unsigned s;
|
||||
|
||||
/* Sanity checks */
|
||||
if (nbBits < 1) return ERROR(GENERIC); /* min size */
|
||||
|
||||
/* Build Decoding Table */
|
||||
DTableH->tableLog = (U16)nbBits;
|
||||
DTableH->fastMode = 1;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
dinfo[s].newState = 0;
|
||||
dinfo[s].symbol = (BYTE)s;
|
||||
dinfo[s].nbBits = (BYTE)nbBits;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
|
||||
void* dst, size_t maxDstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const FSE_DTable* dt, const unsigned fast)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const omax = op + maxDstSize;
|
||||
BYTE* const olimit = omax-3;
|
||||
|
||||
BIT_DStream_t bitD;
|
||||
FSE_DState_t state1;
|
||||
FSE_DState_t state2;
|
||||
|
||||
/* Init */
|
||||
CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
|
||||
|
||||
FSE_initDState(&state1, &bitD, dt);
|
||||
FSE_initDState(&state2, &bitD, dt);
|
||||
|
||||
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
|
||||
|
||||
/* 4 symbols per loop */
|
||||
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
|
||||
op[0] = FSE_GETSYMBOL(&state1);
|
||||
|
||||
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
BIT_reloadDStream(&bitD);
|
||||
|
||||
op[1] = FSE_GETSYMBOL(&state2);
|
||||
|
||||
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
|
||||
|
||||
op[2] = FSE_GETSYMBOL(&state1);
|
||||
|
||||
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
BIT_reloadDStream(&bitD);
|
||||
|
||||
op[3] = FSE_GETSYMBOL(&state2);
|
||||
}
|
||||
|
||||
/* tail */
|
||||
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
|
||||
while (1) {
|
||||
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
|
||||
*op++ = FSE_GETSYMBOL(&state1);
|
||||
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
|
||||
*op++ = FSE_GETSYMBOL(&state2);
|
||||
break;
|
||||
}
|
||||
|
||||
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
|
||||
*op++ = FSE_GETSYMBOL(&state2);
|
||||
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
|
||||
*op++ = FSE_GETSYMBOL(&state1);
|
||||
break;
|
||||
} }
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const FSE_DTable* dt)
|
||||
{
|
||||
const void* ptr = dt;
|
||||
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
|
||||
const U32 fastMode = DTableH->fastMode;
|
||||
|
||||
/* select fast mode (static) */
|
||||
if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
|
||||
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*)cSrc;
|
||||
const BYTE* ip = istart;
|
||||
short counting[FSE_MAX_SYMBOL_VALUE+1];
|
||||
unsigned tableLog;
|
||||
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
||||
|
||||
/* normal FSE decoding mode */
|
||||
size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
|
||||
if (FSE_isError(NCountLength)) return NCountLength;
|
||||
//if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
|
||||
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
|
||||
ip += NCountLength;
|
||||
cSrcSize -= NCountLength;
|
||||
|
||||
CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
|
||||
|
||||
return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
|
||||
}
|
||||
|
||||
|
||||
typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
|
||||
|
||||
size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
|
||||
{
|
||||
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
|
||||
return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif /* FSE_COMMONDEFS_ONLY */
|
327
third/github.com/DataDog/zstd/huf.h
Normal file
327
third/github.com/DataDog/zstd/huf.h
Normal file
@ -0,0 +1,327 @@
|
||||
/* ******************************************************************
|
||||
Huffman coder, part of New Generation Entropy library
|
||||
header file
|
||||
Copyright (C) 2013-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
****************************************************************** */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef HUF_H_298734234
|
||||
#define HUF_H_298734234
|
||||
|
||||
/* *** Dependencies *** */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *** library symbols visibility *** */
|
||||
/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
|
||||
* HUF symbols remain "private" (internal symbols for library only).
|
||||
* Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
|
||||
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
|
||||
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
|
||||
# define HUF_PUBLIC_API __declspec(dllexport)
|
||||
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
|
||||
# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
|
||||
#else
|
||||
# define HUF_PUBLIC_API
|
||||
#endif
|
||||
|
||||
|
||||
/* ========================== */
|
||||
/* *** simple functions *** */
|
||||
/* ========================== */
|
||||
|
||||
/** HUF_compress() :
|
||||
* Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
|
||||
* 'dst' buffer must be already allocated.
|
||||
* Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
|
||||
* `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
|
||||
* @return : size of compressed data (<= `dstCapacity`).
|
||||
* Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
||||
* if HUF_isError(return), compression failed (more details using HUF_getErrorName())
|
||||
*/
|
||||
HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/** HUF_decompress() :
|
||||
* Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
|
||||
* into already allocated buffer 'dst', of minimum size 'dstSize'.
|
||||
* `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
|
||||
* Note : in contrast with FSE, HUF_decompress can regenerate
|
||||
* RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
|
||||
* because it knows size to regenerate (originalSize).
|
||||
* @return : size of regenerated data (== originalSize),
|
||||
* or an error code, which can be tested using HUF_isError()
|
||||
*/
|
||||
HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
|
||||
const void* cSrc, size_t cSrcSize);
|
||||
|
||||
|
||||
/* *** Tool functions *** */
|
||||
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
|
||||
HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
|
||||
|
||||
/* Error Management */
|
||||
HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
|
||||
HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
/* *** Advanced function *** */
|
||||
|
||||
/** HUF_compress2() :
|
||||
* Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
|
||||
* `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
|
||||
* `tableLog` must be `<= HUF_TABLELOG_MAX` . */
|
||||
HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/** HUF_compress4X_wksp() :
|
||||
* Same as HUF_compress2(), but uses externally allocated `workSpace`.
|
||||
* `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
|
||||
#define HUF_WORKSPACE_SIZE (6 << 10)
|
||||
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
|
||||
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize);
|
||||
|
||||
#endif /* HUF_H_298734234 */
|
||||
|
||||
/* ******************************************************************
|
||||
* WARNING !!
|
||||
* The following section contains advanced and experimental definitions
|
||||
* which shall never be used in the context of a dynamic library,
|
||||
* because they are not guaranteed to remain stable in the future.
|
||||
* Only consider them in association with static linking.
|
||||
* *****************************************************************/
|
||||
#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
|
||||
#define HUF_H_HUF_STATIC_LINKING_ONLY
|
||||
|
||||
/* *** Dependencies *** */
|
||||
#include "mem.h" /* U32 */
|
||||
|
||||
|
||||
/* *** Constants *** */
|
||||
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
|
||||
#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
|
||||
#define HUF_SYMBOLVALUE_MAX 255
|
||||
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
|
||||
# error "HUF_TABLELOG_MAX is too large !"
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Static allocation
|
||||
******************************************/
|
||||
/* HUF buffer bounds */
|
||||
#define HUF_CTABLEBOUND 129
|
||||
#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
|
||||
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* static allocation of HUF's Compression Table */
|
||||
#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
|
||||
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
|
||||
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
|
||||
U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
|
||||
void* name##hv = &(name##hb); \
|
||||
HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */
|
||||
|
||||
/* static allocation of HUF's DTable */
|
||||
typedef U32 HUF_DTable;
|
||||
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
|
||||
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
|
||||
#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Advanced decompression functions
|
||||
******************************************/
|
||||
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
|
||||
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
|
||||
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
|
||||
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
|
||||
size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* HUF detailed API
|
||||
* ****************************************/
|
||||
|
||||
/*! HUF_compress() does the following:
|
||||
* 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
|
||||
* 2. (optional) refine tableLog using HUF_optimalTableLog()
|
||||
* 3. build Huffman table from count using HUF_buildCTable()
|
||||
* 4. save Huffman table to memory buffer using HUF_writeCTable()
|
||||
* 5. encode the data stream using HUF_compress4X_usingCTable()
|
||||
*
|
||||
* The following API allows targeting specific sub-functions for advanced tasks.
|
||||
* For example, it's possible to compress several blocks using the same 'CTable',
|
||||
* or to save and regenerate 'CTable' using external methods.
|
||||
*/
|
||||
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
|
||||
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
|
||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
|
||||
typedef enum {
|
||||
HUF_repeat_none, /**< Cannot use the previous table */
|
||||
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
|
||||
HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
|
||||
} HUF_repeat;
|
||||
/** HUF_compress4X_repeat() :
|
||||
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
|
||||
*/
|
||||
#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
|
||||
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
|
||||
|
||||
/*! HUF_readStats() :
|
||||
* Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
* `huffWeight` is destination buffer.
|
||||
* @return : size read from `src` , or an error Code .
|
||||
* Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
|
||||
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/** HUF_readCTable() :
|
||||
* Loading a CTable saved with HUF_writeCTable() */
|
||||
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*
|
||||
* HUF_decompress() does the following:
|
||||
* 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
|
||||
* 2. build Huffman table from save, using HUF_readDTableX?()
|
||||
* 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
|
||||
*/
|
||||
|
||||
/** HUF_selectDecoder() :
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-computed metrics.
|
||||
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
|
||||
* Assumption : 0 < dstSize <= 128 KB */
|
||||
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
|
||||
|
||||
/**
|
||||
* The minimum workspace size for the `workSpace` used in
|
||||
* HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp().
|
||||
*
|
||||
* The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
|
||||
* HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
|
||||
* Buffer overflow errors may potentially occur if code modifications result in
|
||||
* a required workspace size greater than that specified in the following
|
||||
* macro.
|
||||
*/
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
|
||||
|
||||
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
|
||||
size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
|
||||
size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
|
||||
size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
|
||||
|
||||
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
|
||||
|
||||
/* ====================== */
|
||||
/* single stream variants */
|
||||
/* ====================== */
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||
/** HUF_compress1X_repeat() :
|
||||
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid. */
|
||||
size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
|
||||
|
||||
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
|
||||
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
|
||||
|
||||
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
|
||||
size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
|
||||
size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
|
||||
size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
|
||||
size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
|
||||
|
||||
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
|
||||
size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
|
||||
|
||||
/* BMI2 variants.
|
||||
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
|
||||
*/
|
||||
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
|
||||
size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
|
||||
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
|
||||
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
|
||||
|
||||
#endif /* HUF_STATIC_LINKING_ONLY */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
788
third/github.com/DataDog/zstd/huf_compress.c
Normal file
788
third/github.com/DataDog/zstd/huf_compress.c
Normal file
@ -0,0 +1,788 @@
|
||||
/* ******************************************************************
|
||||
Huffman encoder, part of New Generation Entropy library
|
||||
Copyright (C) 2013-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
- Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Compiler specifics
|
||||
****************************************************************/
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include <string.h> /* memcpy, memset */
|
||||
#include <stdio.h> /* printf (debug) */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
|
||||
#include "fse.h" /* header compression */
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
#include "huf.h"
|
||||
#include "error_private.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define HUF_isError ERR_isError
|
||||
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Utils
|
||||
****************************************************************/
|
||||
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
|
||||
}
|
||||
|
||||
|
||||
/* *******************************************************
|
||||
* HUF : Huffman block compression
|
||||
*********************************************************/
|
||||
/* HUF_compressWeights() :
|
||||
* Same as FSE_compress(), but dedicated to huff0's weights compression.
|
||||
* The use case needs much less stack memory.
|
||||
* Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
|
||||
*/
|
||||
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
|
||||
size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
|
||||
U32 maxSymbolValue = HUF_TABLELOG_MAX;
|
||||
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
|
||||
|
||||
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
|
||||
BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
|
||||
|
||||
U32 count[HUF_TABLELOG_MAX+1];
|
||||
S16 norm[HUF_TABLELOG_MAX+1];
|
||||
|
||||
/* init conditions */
|
||||
if (wtSize <= 1) return 0; /* Not compressible */
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
|
||||
if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
|
||||
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
|
||||
}
|
||||
|
||||
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
|
||||
CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
|
||||
op += hSize;
|
||||
}
|
||||
|
||||
/* Compress */
|
||||
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
|
||||
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
|
||||
if (cSize == 0) return 0; /* not enough space for compressed data */
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
|
||||
struct HUF_CElt_s {
|
||||
U16 val;
|
||||
BYTE nbBits;
|
||||
}; /* typedef'd to HUF_CElt within "huf.h" */
|
||||
|
||||
/*! HUF_writeCTable() :
|
||||
`CTable` : Huffman tree to save, using huf representation.
|
||||
@return : size of saved CTable */
|
||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
|
||||
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
|
||||
{
|
||||
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
|
||||
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
|
||||
BYTE* op = (BYTE*)dst;
|
||||
U32 n;
|
||||
|
||||
/* check conditions */
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
|
||||
|
||||
/* convert to weight */
|
||||
bitsToWeight[0] = 0;
|
||||
for (n=1; n<huffLog+1; n++)
|
||||
bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
|
||||
for (n=0; n<maxSymbolValue; n++)
|
||||
huffWeight[n] = bitsToWeight[CTable[n].nbBits];
|
||||
|
||||
/* attempt weights compression by FSE */
|
||||
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
|
||||
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
|
||||
op[0] = (BYTE)hSize;
|
||||
return hSize+1;
|
||||
} }
|
||||
|
||||
/* write raw values as 4-bits (max : 15) */
|
||||
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
|
||||
if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
|
||||
op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
|
||||
huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
|
||||
for (n=0; n<maxSymbolValue; n+=2)
|
||||
op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
|
||||
return ((maxSymbolValue+1)/2) + 1;
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
|
||||
{
|
||||
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
|
||||
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
|
||||
U32 tableLog = 0;
|
||||
U32 nbSymbols = 0;
|
||||
|
||||
/* get symbol weights */
|
||||
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
|
||||
|
||||
/* check result */
|
||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
|
||||
|
||||
/* Prepare base value per rank */
|
||||
{ U32 n, nextRankStart = 0;
|
||||
for (n=1; n<=tableLog; n++) {
|
||||
U32 current = nextRankStart;
|
||||
nextRankStart += (rankVal[n] << (n-1));
|
||||
rankVal[n] = current;
|
||||
} }
|
||||
|
||||
/* fill nbBits */
|
||||
{ U32 n; for (n=0; n<nbSymbols; n++) {
|
||||
const U32 w = huffWeight[n];
|
||||
CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
|
||||
} }
|
||||
|
||||
/* fill val */
|
||||
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
|
||||
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
|
||||
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
|
||||
/* determine stating value per rank */
|
||||
valPerRank[tableLog+1] = 0; /* for w==0 */
|
||||
{ U16 min = 0;
|
||||
U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
|
||||
valPerRank[n] = min; /* get starting value within each rank */
|
||||
min += nbPerRank[n];
|
||||
min >>= 1;
|
||||
} }
|
||||
/* assign value within rank, symbol order */
|
||||
{ U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
|
||||
}
|
||||
|
||||
*maxSymbolValuePtr = nbSymbols - 1;
|
||||
return readSize;
|
||||
}
|
||||
|
||||
|
||||
typedef struct nodeElt_s {
|
||||
U32 count;
|
||||
U16 parent;
|
||||
BYTE byte;
|
||||
BYTE nbBits;
|
||||
} nodeElt;
|
||||
|
||||
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
||||
{
|
||||
const U32 largestBits = huffNode[lastNonNull].nbBits;
|
||||
if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
|
||||
|
||||
/* there are several too large elements (at least >= 2) */
|
||||
{ int totalCost = 0;
|
||||
const U32 baseCost = 1 << (largestBits - maxNbBits);
|
||||
U32 n = lastNonNull;
|
||||
|
||||
while (huffNode[n].nbBits > maxNbBits) {
|
||||
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
|
||||
huffNode[n].nbBits = (BYTE)maxNbBits;
|
||||
n --;
|
||||
} /* n stops at huffNode[n].nbBits <= maxNbBits */
|
||||
while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
|
||||
|
||||
/* renorm totalCost */
|
||||
totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
|
||||
|
||||
/* repay normalized cost */
|
||||
{ U32 const noSymbol = 0xF0F0F0F0;
|
||||
U32 rankLast[HUF_TABLELOG_MAX+2];
|
||||
int pos;
|
||||
|
||||
/* Get pos of last (smallest) symbol per rank */
|
||||
memset(rankLast, 0xF0, sizeof(rankLast));
|
||||
{ U32 currentNbBits = maxNbBits;
|
||||
for (pos=n ; pos >= 0; pos--) {
|
||||
if (huffNode[pos].nbBits >= currentNbBits) continue;
|
||||
currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
|
||||
rankLast[maxNbBits-currentNbBits] = pos;
|
||||
} }
|
||||
|
||||
while (totalCost > 0) {
|
||||
U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
|
||||
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
|
||||
U32 highPos = rankLast[nBitsToDecrease];
|
||||
U32 lowPos = rankLast[nBitsToDecrease-1];
|
||||
if (highPos == noSymbol) continue;
|
||||
if (lowPos == noSymbol) break;
|
||||
{ U32 const highTotal = huffNode[highPos].count;
|
||||
U32 const lowTotal = 2 * huffNode[lowPos].count;
|
||||
if (highTotal <= lowTotal) break;
|
||||
} }
|
||||
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
|
||||
/* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
|
||||
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
|
||||
nBitsToDecrease ++;
|
||||
totalCost -= 1 << (nBitsToDecrease-1);
|
||||
if (rankLast[nBitsToDecrease-1] == noSymbol)
|
||||
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
|
||||
huffNode[rankLast[nBitsToDecrease]].nbBits ++;
|
||||
if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
|
||||
rankLast[nBitsToDecrease] = noSymbol;
|
||||
else {
|
||||
rankLast[nBitsToDecrease]--;
|
||||
if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
|
||||
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
|
||||
} } /* while (totalCost > 0) */
|
||||
|
||||
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
|
||||
if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
||||
while (huffNode[n].nbBits == maxNbBits) n--;
|
||||
huffNode[n+1].nbBits--;
|
||||
rankLast[1] = n+1;
|
||||
totalCost++;
|
||||
continue;
|
||||
}
|
||||
huffNode[ rankLast[1] + 1 ].nbBits--;
|
||||
rankLast[1]++;
|
||||
totalCost ++;
|
||||
} } } /* there are several too large elements (at least >= 2) */
|
||||
|
||||
return maxNbBits;
|
||||
}
|
||||
|
||||
|
||||
typedef struct {
|
||||
U32 base;
|
||||
U32 current;
|
||||
} rankPos;
|
||||
|
||||
static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
|
||||
{
|
||||
rankPos rank[32];
|
||||
U32 n;
|
||||
|
||||
memset(rank, 0, sizeof(rank));
|
||||
for (n=0; n<=maxSymbolValue; n++) {
|
||||
U32 r = BIT_highbit32(count[n] + 1);
|
||||
rank[r].base ++;
|
||||
}
|
||||
for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
|
||||
for (n=0; n<32; n++) rank[n].current = rank[n].base;
|
||||
for (n=0; n<=maxSymbolValue; n++) {
|
||||
U32 const c = count[n];
|
||||
U32 const r = BIT_highbit32(c+1) + 1;
|
||||
U32 pos = rank[r].current++;
|
||||
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) {
|
||||
huffNode[pos] = huffNode[pos-1];
|
||||
pos--;
|
||||
}
|
||||
huffNode[pos].count = c;
|
||||
huffNode[pos].byte = (BYTE)n;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned.
|
||||
*/
|
||||
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
|
||||
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
nodeElt* const huffNode0 = (nodeElt*)workSpace;
|
||||
nodeElt* const huffNode = huffNode0+1;
|
||||
U32 n, nonNullRank;
|
||||
int lowS, lowN;
|
||||
U16 nodeNb = STARTNODE;
|
||||
U32 nodeRoot;
|
||||
|
||||
/* safety checks */
|
||||
if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
||||
if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall);
|
||||
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
|
||||
memset(huffNode0, 0, sizeof(huffNodeTable));
|
||||
|
||||
/* sort, decreasing order */
|
||||
HUF_sort(huffNode, count, maxSymbolValue);
|
||||
|
||||
/* init for parents */
|
||||
nonNullRank = maxSymbolValue;
|
||||
while(huffNode[nonNullRank].count == 0) nonNullRank--;
|
||||
lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
|
||||
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
|
||||
huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
|
||||
nodeNb++; lowS-=2;
|
||||
for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
|
||||
huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
|
||||
|
||||
/* create parents */
|
||||
while (nodeNb <= nodeRoot) {
|
||||
U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
|
||||
U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
|
||||
huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
|
||||
huffNode[n1].parent = huffNode[n2].parent = nodeNb;
|
||||
nodeNb++;
|
||||
}
|
||||
|
||||
/* distribute weights (unlimited tree height) */
|
||||
huffNode[nodeRoot].nbBits = 0;
|
||||
for (n=nodeRoot-1; n>=STARTNODE; n--)
|
||||
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
|
||||
for (n=0; n<=nonNullRank; n++)
|
||||
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
|
||||
|
||||
/* enforce maxTableLog */
|
||||
maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
|
||||
|
||||
/* fill result into tree (val, nbBits) */
|
||||
{ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
|
||||
U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
|
||||
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
|
||||
for (n=0; n<=nonNullRank; n++)
|
||||
nbPerRank[huffNode[n].nbBits]++;
|
||||
/* determine stating value per rank */
|
||||
{ U16 min = 0;
|
||||
for (n=maxNbBits; n>0; n--) {
|
||||
valPerRank[n] = min; /* get starting value within each rank */
|
||||
min += nbPerRank[n];
|
||||
min >>= 1;
|
||||
} }
|
||||
for (n=0; n<=maxSymbolValue; n++)
|
||||
tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
|
||||
for (n=0; n<=maxSymbolValue; n++)
|
||||
tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
|
||||
}
|
||||
|
||||
return maxNbBits;
|
||||
}
|
||||
|
||||
/** HUF_buildCTable() :
|
||||
* @return : maxNbBits
|
||||
* Note : count is used before tree is written, so they can safely overlap
|
||||
*/
|
||||
size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
|
||||
{
|
||||
huffNodeTable nodeTable;
|
||||
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
|
||||
}
|
||||
|
||||
static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
|
||||
{
|
||||
size_t nbBits = 0;
|
||||
int s;
|
||||
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
||||
nbBits += CTable[s].nbBits * count[s];
|
||||
}
|
||||
return nbBits >> 3;
|
||||
}
|
||||
|
||||
static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
|
||||
int bad = 0;
|
||||
int s;
|
||||
for (s = 0; s <= (int)maxSymbolValue; ++s) {
|
||||
bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
|
||||
}
|
||||
return !bad;
|
||||
}
|
||||
|
||||
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
|
||||
|
||||
FORCE_INLINE_TEMPLATE void
|
||||
HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
|
||||
{
|
||||
BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
|
||||
}
|
||||
|
||||
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
|
||||
|
||||
#define HUF_FLUSHBITS_1(stream) \
|
||||
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
|
||||
|
||||
#define HUF_FLUSHBITS_2(stream) \
|
||||
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
size_t n;
|
||||
BIT_CStream_t bitC;
|
||||
|
||||
/* init */
|
||||
if (dstSize < 8) return 0; /* not enough space to compress */
|
||||
{ size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
|
||||
if (HUF_isError(initErr)) return 0; }
|
||||
|
||||
n = srcSize & ~3; /* join to mod 4 */
|
||||
switch (srcSize & 3)
|
||||
{
|
||||
case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
|
||||
HUF_FLUSHBITS_2(&bitC);
|
||||
/* fall-through */
|
||||
case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
|
||||
HUF_FLUSHBITS_1(&bitC);
|
||||
/* fall-through */
|
||||
case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
|
||||
HUF_FLUSHBITS(&bitC);
|
||||
/* fall-through */
|
||||
case 0 : /* fall-through */
|
||||
default: break;
|
||||
}
|
||||
|
||||
for (; n>0; n-=4) { /* note : n&3==0 at this stage */
|
||||
HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
|
||||
HUF_FLUSHBITS_1(&bitC);
|
||||
HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
|
||||
HUF_FLUSHBITS_2(&bitC);
|
||||
HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
|
||||
HUF_FLUSHBITS_1(&bitC);
|
||||
HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
|
||||
HUF_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
return BIT_closeCStream(&bitC);
|
||||
}
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
|
||||
static TARGET_ATTRIBUTE("bmi2") size_t
|
||||
HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable)
|
||||
{
|
||||
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
|
||||
static size_t
|
||||
HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable)
|
||||
{
|
||||
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
|
||||
static size_t
|
||||
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable, const int bmi2)
|
||||
{
|
||||
if (bmi2) {
|
||||
return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static size_t
|
||||
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable, const int bmi2)
|
||||
{
|
||||
(void)bmi2;
|
||||
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||
{
|
||||
return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
static size_t
|
||||
HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const HUF_CElt* CTable, int bmi2)
|
||||
{
|
||||
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
const BYTE* const iend = ip + srcSize;
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
|
||||
if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
|
||||
if (srcSize < 12) return 0; /* no saving possible : too small input */
|
||||
op += 6; /* jumpTable */
|
||||
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
|
||||
if (cSize==0) return 0;
|
||||
assert(cSize <= 65535);
|
||||
MEM_writeLE16(ostart, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
|
||||
if (cSize==0) return 0;
|
||||
assert(cSize <= 65535);
|
||||
MEM_writeLE16(ostart+2, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
|
||||
if (cSize==0) return 0;
|
||||
assert(cSize <= 65535);
|
||||
MEM_writeLE16(ostart+4, (U16)cSize);
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
ip += segmentSize;
|
||||
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) );
|
||||
if (cSize==0) return 0;
|
||||
op += cSize;
|
||||
}
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||
{
|
||||
return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
static size_t HUF_compressCTable_internal(
|
||||
BYTE* const ostart, BYTE* op, BYTE* const oend,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned singleStream, const HUF_CElt* CTable, const int bmi2)
|
||||
{
|
||||
size_t const cSize = singleStream ?
|
||||
HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
|
||||
HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
|
||||
if (HUF_isError(cSize)) { return cSize; }
|
||||
if (cSize==0) { return 0; } /* uncompressible */
|
||||
op += cSize;
|
||||
/* check compressibility */
|
||||
if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
U32 count[HUF_SYMBOLVALUE_MAX + 1];
|
||||
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
|
||||
huffNodeTable nodeTable;
|
||||
} HUF_compress_tables_t;
|
||||
|
||||
/* HUF_compress_internal() :
|
||||
* `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
|
||||
static size_t HUF_compress_internal (
|
||||
void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
unsigned singleStream,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
|
||||
const int bmi2)
|
||||
{
|
||||
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
|
||||
/* checks & inits */
|
||||
if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
||||
if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
|
||||
if (!srcSize) return 0; /* Uncompressed */
|
||||
if (!dstSize) return 0; /* cannot fit anything within dst budget */
|
||||
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
|
||||
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
|
||||
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
|
||||
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
|
||||
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
|
||||
|
||||
/* Heuristic : If old table is valid, use it for small inputs */
|
||||
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend,
|
||||
src, srcSize,
|
||||
singleStream, oldHufTable, bmi2);
|
||||
}
|
||||
|
||||
/* Scan input and build symbol stats */
|
||||
{ CHECK_V_F(largest, FSE_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) );
|
||||
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
|
||||
if (largest <= (srcSize >> 7)+1) return 0; /* heuristic : probably not compressible enough */
|
||||
}
|
||||
|
||||
/* Check validity of previous table */
|
||||
if ( repeat
|
||||
&& *repeat == HUF_repeat_check
|
||||
&& !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
|
||||
*repeat = HUF_repeat_none;
|
||||
}
|
||||
/* Heuristic : use existing table for small inputs */
|
||||
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend,
|
||||
src, srcSize,
|
||||
singleStream, oldHufTable, bmi2);
|
||||
}
|
||||
|
||||
/* Build Huffman Tree */
|
||||
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
|
||||
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count,
|
||||
maxSymbolValue, huffLog,
|
||||
table->nodeTable, sizeof(table->nodeTable)) );
|
||||
huffLog = (U32)maxBits;
|
||||
/* Zero unused symbols in CTable, so we can check it for validity */
|
||||
memset(table->CTable + (maxSymbolValue + 1), 0,
|
||||
sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
|
||||
}
|
||||
|
||||
/* Write table description header */
|
||||
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );
|
||||
/* Check if using previous huffman table is beneficial */
|
||||
if (repeat && *repeat != HUF_repeat_none) {
|
||||
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
|
||||
size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
|
||||
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
|
||||
return HUF_compressCTable_internal(ostart, op, oend,
|
||||
src, srcSize,
|
||||
singleStream, oldHufTable, bmi2);
|
||||
} }
|
||||
|
||||
/* Use the new huffman table */
|
||||
if (hSize + 12ul >= srcSize) { return 0; }
|
||||
op += hSize;
|
||||
if (repeat) { *repeat = HUF_repeat_none; }
|
||||
if (oldHufTable)
|
||||
memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
|
||||
}
|
||||
return HUF_compressCTable_internal(ostart, op, oend,
|
||||
src, srcSize,
|
||||
singleStream, table->CTable, bmi2);
|
||||
}
|
||||
|
||||
|
||||
size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||
maxSymbolValue, huffLog, 1 /*single stream*/,
|
||||
workSpace, wkspSize,
|
||||
NULL, NULL, 0, 0 /*bmi2*/);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||
maxSymbolValue, huffLog, 1 /*single stream*/,
|
||||
workSpace, wkspSize, hufTable,
|
||||
repeat, preferRepeat, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_compress1X (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog)
|
||||
{
|
||||
unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
|
||||
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
/* HUF_compress4X_repeat():
|
||||
* compress input using 4 streams.
|
||||
* provide workspace to generate compression tables */
|
||||
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||
maxSymbolValue, huffLog, 0 /*4 streams*/,
|
||||
workSpace, wkspSize,
|
||||
NULL, NULL, 0, 0 /*bmi2*/);
|
||||
}
|
||||
|
||||
/* HUF_compress4X_repeat():
|
||||
* compress input using 4 streams.
|
||||
* re-use an existing huffman compression table */
|
||||
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog,
|
||||
void* workSpace, size_t wkspSize,
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
|
||||
{
|
||||
return HUF_compress_internal(dst, dstSize, src, srcSize,
|
||||
maxSymbolValue, huffLog, 0 /* 4 streams */,
|
||||
workSpace, wkspSize,
|
||||
hufTable, repeat, preferRepeat, bmi2);
|
||||
}
|
||||
|
||||
size_t HUF_compress2 (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned huffLog)
|
||||
{
|
||||
unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
|
||||
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
|
||||
}
|
||||
|
||||
size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
|
||||
{
|
||||
return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
|
||||
}
|
1096
third/github.com/DataDog/zstd/huf_decompress.c
Normal file
1096
third/github.com/DataDog/zstd/huf_decompress.c
Normal file
File diff suppressed because it is too large
Load Diff
362
third/github.com/DataDog/zstd/mem.h
Normal file
362
third/github.com/DataDog/zstd/mem.h
Normal file
@ -0,0 +1,362 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef MEM_H_MODULE
|
||||
#define MEM_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <stddef.h> /* size_t, ptrdiff_t */
|
||||
#include <string.h> /* memcpy */
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Compiler specifics
|
||||
******************************************/
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
# include <stdlib.h> /* _byteswap_ulong */
|
||||
# include <intrin.h> /* _byteswap_* */
|
||||
#endif
|
||||
#if defined(__GNUC__)
|
||||
# define MEM_STATIC static __inline __attribute__((unused))
|
||||
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define MEM_STATIC static inline
|
||||
#elif defined(_MSC_VER)
|
||||
# define MEM_STATIC static __inline
|
||||
#else
|
||||
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
#endif
|
||||
|
||||
/* code only tested on 32 and 64 bits systems */
|
||||
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
|
||||
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Basic Types
|
||||
*****************************************************************/
|
||||
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef int16_t S16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef int64_t S64;
|
||||
#else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef signed short S16;
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
typedef unsigned long long U64;
|
||||
typedef signed long long S64;
|
||||
#endif
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Memory I/O
|
||||
*****************************************************************/
|
||||
/* MEM_FORCE_MEMORY_ACCESS :
|
||||
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
||||
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
||||
* The below switch allow to select different access method for improved performance.
|
||||
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
||||
* Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
|
||||
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
||||
* Method 2 : direct access. This method is portable but violate C standard.
|
||||
* It can generate buggy code on targets depending on alignment.
|
||||
* In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
|
||||
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
|
||||
* Prefer these methods in priority order (0 > 1 > 2)
|
||||
*/
|
||||
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
||||
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
||||
# define MEM_FORCE_MEMORY_ACCESS 2
|
||||
# elif defined(__INTEL_COMPILER) || defined(__GNUC__)
|
||||
# define MEM_FORCE_MEMORY_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
|
||||
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
|
||||
|
||||
MEM_STATIC unsigned MEM_isLittleEndian(void)
|
||||
{
|
||||
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
return one.c[0];
|
||||
}
|
||||
|
||||
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
|
||||
|
||||
/* violates C standard, by lying on structure alignment.
|
||||
Only use if no other choice to achieve best performance on target platform */
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
|
||||
|
||||
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
|
||||
__pragma( pack(push, 1) )
|
||||
typedef struct { U16 v; } unalign16;
|
||||
typedef struct { U32 v; } unalign32;
|
||||
typedef struct { U64 v; } unalign64;
|
||||
typedef struct { size_t v; } unalignArch;
|
||||
__pragma( pack(pop) )
|
||||
#else
|
||||
typedef struct { U16 v; } __attribute__((packed)) unalign16;
|
||||
typedef struct { U32 v; } __attribute__((packed)) unalign32;
|
||||
typedef struct { U64 v; } __attribute__((packed)) unalign64;
|
||||
typedef struct { size_t v; } __attribute__((packed)) unalignArch;
|
||||
#endif
|
||||
|
||||
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
|
||||
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
|
||||
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
|
||||
MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
|
||||
|
||||
#else
|
||||
|
||||
/* default method, safe and standard.
|
||||
can sometimes prove slower */
|
||||
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr)
|
||||
{
|
||||
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr)
|
||||
{
|
||||
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr)
|
||||
{
|
||||
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr)
|
||||
{
|
||||
size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
|
||||
{
|
||||
memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
|
||||
{
|
||||
memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
||||
{
|
||||
memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
||||
|
||||
MEM_STATIC U32 MEM_swap32(U32 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_ulong(in);
|
||||
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
|
||||
return __builtin_bswap32(in);
|
||||
#else
|
||||
return ((in << 24) & 0xff000000 ) |
|
||||
((in << 8) & 0x00ff0000 ) |
|
||||
((in >> 8) & 0x0000ff00 ) |
|
||||
((in >> 24) & 0x000000ff );
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_swap64(U64 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_uint64(in);
|
||||
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
|
||||
return __builtin_bswap64(in);
|
||||
#else
|
||||
return ((in << 56) & 0xff00000000000000ULL) |
|
||||
((in << 40) & 0x00ff000000000000ULL) |
|
||||
((in << 24) & 0x0000ff0000000000ULL) |
|
||||
((in << 8) & 0x000000ff00000000ULL) |
|
||||
((in >> 8) & 0x00000000ff000000ULL) |
|
||||
((in >> 24) & 0x0000000000ff0000ULL) |
|
||||
((in >> 40) & 0x000000000000ff00ULL) |
|
||||
((in >> 56) & 0x00000000000000ffULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_swapST(size_t in)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_swap32((U32)in);
|
||||
else
|
||||
return (size_t)MEM_swap64((U64)in);
|
||||
}
|
||||
|
||||
/*=== Little endian r/w ===*/
|
||||
|
||||
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read16(memPtr);
|
||||
else {
|
||||
const BYTE* p = (const BYTE*)memPtr;
|
||||
return (U16)(p[0] + (p[1]<<8));
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
|
||||
{
|
||||
if (MEM_isLittleEndian()) {
|
||||
MEM_write16(memPtr, val);
|
||||
} else {
|
||||
BYTE* p = (BYTE*)memPtr;
|
||||
p[0] = (BYTE)val;
|
||||
p[1] = (BYTE)(val>>8);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
|
||||
{
|
||||
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
|
||||
{
|
||||
MEM_writeLE16(memPtr, (U16)val);
|
||||
((BYTE*)memPtr)[2] = (BYTE)(val>>16);
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read32(memPtr);
|
||||
else
|
||||
return MEM_swap32(MEM_read32(memPtr));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write32(memPtr, val32);
|
||||
else
|
||||
MEM_write32(memPtr, MEM_swap32(val32));
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read64(memPtr);
|
||||
else
|
||||
return MEM_swap64(MEM_read64(memPtr));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write64(memPtr, val64);
|
||||
else
|
||||
MEM_write64(memPtr, MEM_swap64(val64));
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_readLE32(memPtr);
|
||||
else
|
||||
return (size_t)MEM_readLE64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
MEM_writeLE32(memPtr, (U32)val);
|
||||
else
|
||||
MEM_writeLE64(memPtr, (U64)val);
|
||||
}
|
||||
|
||||
/*=== Big endian r/w ===*/
|
||||
|
||||
MEM_STATIC U32 MEM_readBE32(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_swap32(MEM_read32(memPtr));
|
||||
else
|
||||
return MEM_read32(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write32(memPtr, MEM_swap32(val32));
|
||||
else
|
||||
MEM_write32(memPtr, val32);
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_readBE64(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_swap64(MEM_read64(memPtr));
|
||||
else
|
||||
return MEM_read64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write64(memPtr, MEM_swap64(val64));
|
||||
else
|
||||
MEM_write64(memPtr, val64);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readBEST(const void* memPtr)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_readBE32(memPtr);
|
||||
else
|
||||
return (size_t)MEM_readBE64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
MEM_writeBE32(memPtr, (U32)val);
|
||||
else
|
||||
MEM_writeBE64(memPtr, (U64)val);
|
||||
}
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* MEM_H_MODULE */
|
283
third/github.com/DataDog/zstd/pool.c
Normal file
283
third/github.com/DataDog/zstd/pool.c
Normal file
@ -0,0 +1,283 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
/* ====== Dependencies ======= */
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "pool.h"
|
||||
#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */
|
||||
|
||||
/* ====== Compiler specifics ====== */
|
||||
#if defined(_MSC_VER)
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
|
||||
#include "threading.h" /* pthread adaptation */
|
||||
|
||||
/* A job is a function and an opaque argument */
|
||||
typedef struct POOL_job_s {
|
||||
POOL_function function;
|
||||
void *opaque;
|
||||
} POOL_job;
|
||||
|
||||
struct POOL_ctx_s {
|
||||
ZSTD_customMem customMem;
|
||||
/* Keep track of the threads */
|
||||
ZSTD_pthread_t *threads;
|
||||
size_t numThreads;
|
||||
|
||||
/* The queue is a circular buffer */
|
||||
POOL_job *queue;
|
||||
size_t queueHead;
|
||||
size_t queueTail;
|
||||
size_t queueSize;
|
||||
|
||||
/* The number of threads working on jobs */
|
||||
size_t numThreadsBusy;
|
||||
/* Indicates if the queue is empty */
|
||||
int queueEmpty;
|
||||
|
||||
/* The mutex protects the queue */
|
||||
ZSTD_pthread_mutex_t queueMutex;
|
||||
/* Condition variable for pushers to wait on when the queue is full */
|
||||
ZSTD_pthread_cond_t queuePushCond;
|
||||
/* Condition variables for poppers to wait on when the queue is empty */
|
||||
ZSTD_pthread_cond_t queuePopCond;
|
||||
/* Indicates if the queue is shutting down */
|
||||
int shutdown;
|
||||
};
|
||||
|
||||
/* POOL_thread() :
|
||||
Work thread for the thread pool.
|
||||
Waits for jobs and executes them.
|
||||
@returns : NULL on failure else non-null.
|
||||
*/
|
||||
static void* POOL_thread(void* opaque) {
|
||||
POOL_ctx* const ctx = (POOL_ctx*)opaque;
|
||||
if (!ctx) { return NULL; }
|
||||
for (;;) {
|
||||
/* Lock the mutex and wait for a non-empty queue or until shutdown */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
|
||||
while (ctx->queueEmpty && !ctx->shutdown) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
|
||||
}
|
||||
/* empty => shutting down: so stop */
|
||||
if (ctx->queueEmpty) {
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return opaque;
|
||||
}
|
||||
/* Pop a job off the queue */
|
||||
{ POOL_job const job = ctx->queue[ctx->queueHead];
|
||||
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
|
||||
ctx->numThreadsBusy++;
|
||||
ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
|
||||
/* Unlock the mutex, signal a pusher, and run the job */
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
||||
|
||||
job.function(job.opaque);
|
||||
|
||||
/* If the intended queue size was 0, signal after finishing job */
|
||||
if (ctx->queueSize == 1) {
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->numThreadsBusy--;
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
||||
} }
|
||||
} /* for (;;) */
|
||||
/* Unreachable */
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
|
||||
POOL_ctx* ctx;
|
||||
/* Check the parameters */
|
||||
if (!numThreads) { return NULL; }
|
||||
/* Allocate the context and zero initialize */
|
||||
ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
|
||||
if (!ctx) { return NULL; }
|
||||
/* Initialize the job queue.
|
||||
* It needs one extra space since one space is wasted to differentiate empty
|
||||
* and full queues.
|
||||
*/
|
||||
ctx->queueSize = queueSize + 1;
|
||||
ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
|
||||
ctx->queueHead = 0;
|
||||
ctx->queueTail = 0;
|
||||
ctx->numThreadsBusy = 0;
|
||||
ctx->queueEmpty = 1;
|
||||
(void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
|
||||
(void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
|
||||
(void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
|
||||
ctx->shutdown = 0;
|
||||
/* Allocate space for the thread handles */
|
||||
ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
||||
ctx->numThreads = 0;
|
||||
ctx->customMem = customMem;
|
||||
/* Check for errors */
|
||||
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
|
||||
/* Initialize the threads */
|
||||
{ size_t i;
|
||||
for (i = 0; i < numThreads; ++i) {
|
||||
if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
|
||||
ctx->numThreads = i;
|
||||
POOL_free(ctx);
|
||||
return NULL;
|
||||
} }
|
||||
ctx->numThreads = numThreads;
|
||||
}
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/*! POOL_join() :
|
||||
Shutdown the queue, wake any sleeping threads, and join all of the threads.
|
||||
*/
|
||||
static void POOL_join(POOL_ctx* ctx) {
|
||||
/* Shut down the queue */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->shutdown = 1;
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
/* Wake up sleeping threads */
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
|
||||
/* Join all of the threads */
|
||||
{ size_t i;
|
||||
for (i = 0; i < ctx->numThreads; ++i) {
|
||||
ZSTD_pthread_join(ctx->threads[i], NULL);
|
||||
} }
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx *ctx) {
|
||||
if (!ctx) { return; }
|
||||
POOL_join(ctx);
|
||||
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
|
||||
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
|
||||
ZSTD_free(ctx->queue, ctx->customMem);
|
||||
ZSTD_free(ctx->threads, ctx->customMem);
|
||||
ZSTD_free(ctx, ctx->customMem);
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(POOL_ctx *ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
return sizeof(*ctx)
|
||||
+ ctx->queueSize * sizeof(POOL_job)
|
||||
+ ctx->numThreads * sizeof(ZSTD_pthread_t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 1 if the queue is full and 0 otherwise.
|
||||
*
|
||||
* If the queueSize is 1 (the pool was created with an intended queueSize of 0),
|
||||
* then a queue is empty if there is a thread free and no job is waiting.
|
||||
*/
|
||||
static int isQueueFull(POOL_ctx const* ctx) {
|
||||
if (ctx->queueSize > 1) {
|
||||
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
|
||||
} else {
|
||||
return ctx->numThreadsBusy == ctx->numThreads ||
|
||||
!ctx->queueEmpty;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
|
||||
{
|
||||
POOL_job const job = {function, opaque};
|
||||
assert(ctx != NULL);
|
||||
if (ctx->shutdown) return;
|
||||
|
||||
ctx->queueEmpty = 0;
|
||||
ctx->queue[ctx->queueTail] = job;
|
||||
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePopCond);
|
||||
}
|
||||
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
|
||||
{
|
||||
assert(ctx != NULL);
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
/* Wait until there is space in the queue for the new job */
|
||||
while (isQueueFull(ctx) && (!ctx->shutdown)) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
}
|
||||
POOL_add_internal(ctx, function, opaque);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
}
|
||||
|
||||
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
|
||||
{
|
||||
assert(ctx != NULL);
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
if (isQueueFull(ctx)) {
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return 0;
|
||||
}
|
||||
POOL_add_internal(ctx, function, opaque);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
|
||||
/* ========================== */
|
||||
/* No multi-threading support */
|
||||
/* ========================== */
|
||||
|
||||
|
||||
/* We don't need any data, but if it is empty, malloc() might return NULL. */
|
||||
struct POOL_ctx_s {
|
||||
int dummy;
|
||||
};
|
||||
static POOL_ctx g_ctx;
|
||||
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
|
||||
(void)numThreads;
|
||||
(void)queueSize;
|
||||
(void)customMem;
|
||||
return &g_ctx;
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx* ctx) {
|
||||
assert(!ctx || ctx == &g_ctx);
|
||||
(void)ctx;
|
||||
}
|
||||
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
}
|
||||
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(POOL_ctx* ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
assert(ctx == &g_ctx);
|
||||
return sizeof(*ctx);
|
||||
}
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
74
third/github.com/DataDog/zstd/pool.h
Normal file
74
third/github.com/DataDog/zstd/pool.h
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef POOL_H
|
||||
#define POOL_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
#include <stddef.h> /* size_t */
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
|
||||
#include "zstd.h"
|
||||
|
||||
typedef struct POOL_ctx_s POOL_ctx;
|
||||
|
||||
/*! POOL_create() :
|
||||
* Create a thread pool with at most `numThreads` threads.
|
||||
* `numThreads` must be at least 1.
|
||||
* The maximum number of queued jobs before blocking is `queueSize`.
|
||||
* @return : POOL_ctx pointer on success, else NULL.
|
||||
*/
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
|
||||
|
||||
/*! POOL_free() :
|
||||
Free a thread pool returned by POOL_create().
|
||||
*/
|
||||
void POOL_free(POOL_ctx* ctx);
|
||||
|
||||
/*! POOL_sizeof() :
|
||||
return memory usage of pool returned by POOL_create().
|
||||
*/
|
||||
size_t POOL_sizeof(POOL_ctx* ctx);
|
||||
|
||||
/*! POOL_function :
|
||||
The function type that can be added to a thread pool.
|
||||
*/
|
||||
typedef void (*POOL_function)(void*);
|
||||
/*! POOL_add_function :
|
||||
The function type for a generic thread pool add function.
|
||||
*/
|
||||
typedef void (*POOL_add_function)(void*, POOL_function, void*);
|
||||
|
||||
/*! POOL_add() :
|
||||
Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
|
||||
Possibly blocks until there is room in the queue.
|
||||
Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
|
||||
*/
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
|
||||
|
||||
|
||||
/*! POOL_tryAdd() :
|
||||
Add the job `function(opaque)` to the thread pool if a worker is available.
|
||||
return immediately otherwise.
|
||||
@return : 1 if successful, 0 if not.
|
||||
*/
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
75
third/github.com/DataDog/zstd/threading.c
Normal file
75
third/github.com/DataDog/zstd/threading.c
Normal file
@ -0,0 +1,75 @@
|
||||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
||||
*/
|
||||
|
||||
/**
|
||||
* This file will hold wrapper for systems, which do not support pthreads
|
||||
*/
|
||||
|
||||
/* create fake symbol to avoid empty trnaslation unit warning */
|
||||
int g_ZSTD_threading_useles_symbol;
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
||||
|
||||
/**
|
||||
* Windows minimalist Pthread Wrapper, based on :
|
||||
* http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
|
||||
*/
|
||||
|
||||
|
||||
/* === Dependencies === */
|
||||
#include <process.h>
|
||||
#include <errno.h>
|
||||
#include "threading.h"
|
||||
|
||||
|
||||
/* === Implementation === */
|
||||
|
||||
static unsigned __stdcall worker(void *arg)
|
||||
{
|
||||
ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
|
||||
thread->arg = thread->start_routine(thread->arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
||||
void* (*start_routine) (void*), void* arg)
|
||||
{
|
||||
(void)unused;
|
||||
thread->arg = arg;
|
||||
thread->start_routine = start_routine;
|
||||
thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
|
||||
|
||||
if (!thread->handle)
|
||||
return errno;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
|
||||
{
|
||||
DWORD result;
|
||||
|
||||
if (!thread.handle) return 0;
|
||||
|
||||
result = WaitForSingleObject(thread.handle, INFINITE);
|
||||
switch (result) {
|
||||
case WAIT_OBJECT_0:
|
||||
if (value_ptr) *value_ptr = thread.arg;
|
||||
return 0;
|
||||
case WAIT_ABANDONED:
|
||||
return EINVAL;
|
||||
default:
|
||||
return GetLastError();
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
123
third/github.com/DataDog/zstd/threading.h
Normal file
123
third/github.com/DataDog/zstd/threading.h
Normal file
@ -0,0 +1,123 @@
|
||||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
||||
*/
|
||||
|
||||
#ifndef THREADING_H_938743
|
||||
#define THREADING_H_938743
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
||||
|
||||
/**
|
||||
* Windows minimalist Pthread Wrapper, based on :
|
||||
* http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
|
||||
*/
|
||||
#ifdef WINVER
|
||||
# undef WINVER
|
||||
#endif
|
||||
#define WINVER 0x0600
|
||||
|
||||
#ifdef _WIN32_WINNT
|
||||
# undef _WIN32_WINNT
|
||||
#endif
|
||||
#define _WIN32_WINNT 0x0600
|
||||
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
|
||||
#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
|
||||
#include <windows.h>
|
||||
#undef ERROR
|
||||
#define ERROR(name) ZSTD_ERROR(name)
|
||||
|
||||
|
||||
/* mutex */
|
||||
#define ZSTD_pthread_mutex_t CRITICAL_SECTION
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
|
||||
|
||||
/* condition variable */
|
||||
#define ZSTD_pthread_cond_t CONDITION_VARIABLE
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
|
||||
#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
|
||||
|
||||
/* ZSTD_pthread_create() and ZSTD_pthread_join() */
|
||||
typedef struct {
|
||||
HANDLE handle;
|
||||
void* (*start_routine)(void*);
|
||||
void* arg;
|
||||
} ZSTD_pthread_t;
|
||||
|
||||
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
||||
void* (*start_routine) (void*), void* arg);
|
||||
|
||||
int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
|
||||
|
||||
/**
|
||||
* add here more wrappers as required
|
||||
*/
|
||||
|
||||
|
||||
#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
|
||||
/* === POSIX Systems === */
|
||||
# include <pthread.h>
|
||||
|
||||
#define ZSTD_pthread_mutex_t pthread_mutex_t
|
||||
#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
|
||||
#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a))
|
||||
|
||||
#define ZSTD_pthread_cond_t pthread_cond_t
|
||||
#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b))
|
||||
#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b))
|
||||
#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a))
|
||||
|
||||
#define ZSTD_pthread_t pthread_t
|
||||
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
|
||||
#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
/* No multithreading support */
|
||||
|
||||
typedef int ZSTD_pthread_mutex_t;
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_mutex_lock(a) ((void)(a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) ((void)(a))
|
||||
|
||||
typedef int ZSTD_pthread_cond_t;
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b))
|
||||
#define ZSTD_pthread_cond_signal(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) ((void)(a))
|
||||
|
||||
/* do not use ZSTD_pthread_t */
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* THREADING_H_938743 */
|
18
third/github.com/DataDog/zstd/travis_test_32.sh
Executable file
18
third/github.com/DataDog/zstd/travis_test_32.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
# Get utilities
|
||||
yum -y -q -e 0 install wget tar unzip gcc
|
||||
|
||||
# Get Go
|
||||
wget -q https://dl.google.com/go/go1.11.1.linux-386.tar.gz
|
||||
tar -C /usr/local -xzf go1.11.1.linux-386.tar.gz
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
|
||||
# Get payload
|
||||
wget -q https://github.com/DataDog/zstd/files/2246767/mr.zip
|
||||
unzip mr.zip
|
||||
|
||||
# Build and run tests
|
||||
cd zstd
|
||||
go build
|
||||
PAYLOAD=$(pwd)/mr go test -v
|
||||
PAYLOAD=$(pwd)/mr go test -bench .
|
56
third/github.com/DataDog/zstd/update.txt
Normal file
56
third/github.com/DataDog/zstd/update.txt
Normal file
@ -0,0 +1,56 @@
|
||||
./lib/common/bitstream.h
|
||||
./lib/common/compiler.h
|
||||
./lib/compress/zstd_compress_internal.h
|
||||
./lib/compress/zstd_fast.h
|
||||
./lib/compress/zstd_double_fast.h
|
||||
./lib/compress/zstd_lazy.h
|
||||
./lib/compress/zstd_ldm.h
|
||||
./lib/dictBuilder/cover.c
|
||||
./lib/dictBuilder/divsufsort.c
|
||||
./lib/dictBuilder/divsufsort.h
|
||||
./lib/common/entropy_common.c
|
||||
./lib/common/error_private.c
|
||||
./lib/common/error_private.h
|
||||
./lib/compress/fse_compress.c
|
||||
./lib/common/fse_decompress.c
|
||||
./lib/common/fse.h
|
||||
./lib/compress/huf_compress.c
|
||||
./lib/decompress/huf_decompress.c
|
||||
./lib/common/huf.h
|
||||
./lib/common/mem.h
|
||||
./lib/common/pool.c
|
||||
./lib/common/pool.h
|
||||
./lib/common/threading.c
|
||||
./lib/common/threading.h
|
||||
./lib/common/xxhash.c
|
||||
./lib/common/xxhash.h
|
||||
./lib/deprecated/zbuff_common.c
|
||||
./lib/deprecated/zbuff_compress.c
|
||||
./lib/deprecated/zbuff_decompress.c
|
||||
./lib/deprecated/zbuff.h
|
||||
./lib/dictBuilder/zdict.c
|
||||
./lib/dictBuilder/zdict.h
|
||||
./lib/common/zstd_common.c
|
||||
./lib/compress/zstd_compress.c
|
||||
./lib/decompress/zstd_decompress.c
|
||||
./lib/common/zstd_errors.h
|
||||
./lib/zstd.h
|
||||
./lib/common/zstd_internal.h
|
||||
./lib/legacy/zstd_legacy.h
|
||||
./lib/compress/zstd_opt.c
|
||||
./lib/compress/zstd_opt.h
|
||||
./lib/legacy/zstd_v01.c
|
||||
./lib/legacy/zstd_v01.h
|
||||
./lib/legacy/zstd_v02.c
|
||||
./lib/legacy/zstd_v02.h
|
||||
./lib/legacy/zstd_v03.c
|
||||
./lib/legacy/zstd_v03.h
|
||||
./lib/legacy/zstd_v04.c
|
||||
./lib/legacy/zstd_v04.h
|
||||
./lib/legacy/zstd_v05.c
|
||||
./lib/legacy/zstd_v05.h
|
||||
./lib/legacy/zstd_v06.c
|
||||
./lib/legacy/zstd_v06.h
|
||||
./lib/legacy/zstd_v07.c
|
||||
./lib/legacy/zstd_v07.h
|
||||
|
875
third/github.com/DataDog/zstd/xxhash.c
Normal file
875
third/github.com/DataDog/zstd/xxhash.c
Normal file
@ -0,0 +1,875 @@
|
||||
/*
|
||||
* xxHash - Fast Hash algorithm
|
||||
* Copyright (C) 2012-2016, Yann Collet
|
||||
*
|
||||
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - xxHash homepage: http://www.xxhash.com
|
||||
* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tuning parameters
|
||||
***************************************/
|
||||
/*!XXH_FORCE_MEMORY_ACCESS :
|
||||
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
||||
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
||||
* The below switch allow to select different access method for improved performance.
|
||||
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
||||
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
|
||||
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
||||
* Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
|
||||
* It can generate buggy code on targets which do not support unaligned memory accesses.
|
||||
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
|
||||
* See http://stackoverflow.com/a/32095106/646947 for details.
|
||||
* Prefer these methods in priority order (0 > 1 > 2)
|
||||
*/
|
||||
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
||||
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
||||
# define XXH_FORCE_MEMORY_ACCESS 2
|
||||
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
|
||||
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
|
||||
# define XXH_FORCE_MEMORY_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
|
||||
* If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
|
||||
* When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
|
||||
* By default, this option is disabled. To enable it, uncomment below define :
|
||||
*/
|
||||
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
|
||||
|
||||
/*!XXH_FORCE_NATIVE_FORMAT :
|
||||
* By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
|
||||
* Results are therefore identical for little-endian and big-endian CPU.
|
||||
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
|
||||
* Should endian-independance be of no importance for your application, you may set the #define below to 1,
|
||||
* to improve speed for Big-endian CPU.
|
||||
* This option has no impact on Little_Endian CPU.
|
||||
*/
|
||||
#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
|
||||
# define XXH_FORCE_NATIVE_FORMAT 0
|
||||
#endif
|
||||
|
||||
/*!XXH_FORCE_ALIGN_CHECK :
|
||||
* This is a minor performance trick, only useful with lots of very small keys.
|
||||
* It means : check for aligned/unaligned input.
|
||||
* The check costs one initial branch per hash; set to 0 when the input data
|
||||
* is guaranteed to be aligned.
|
||||
*/
|
||||
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
|
||||
# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
|
||||
# define XXH_FORCE_ALIGN_CHECK 0
|
||||
# else
|
||||
# define XXH_FORCE_ALIGN_CHECK 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Includes & Memory related functions
|
||||
***************************************/
|
||||
/* Modify the local functions below should you wish to use some other memory routines */
|
||||
/* for malloc(), free() */
|
||||
#include <stdlib.h>
|
||||
static void* XXH_malloc(size_t s) { return malloc(s); }
|
||||
static void XXH_free (void* p) { free(p); }
|
||||
/* for memcpy() */
|
||||
#include <string.h>
|
||||
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
||||
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY
|
||||
#endif
|
||||
#include "xxhash.h"
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Compiler Specific Options
|
||||
***************************************/
|
||||
#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# define INLINE_KEYWORD inline
|
||||
#else
|
||||
# define INLINE_KEYWORD
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
||||
#elif defined(_MSC_VER)
|
||||
# define FORCE_INLINE_ATTR __forceinline
|
||||
#else
|
||||
# define FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Basic Types
|
||||
***************************************/
|
||||
#ifndef MEM_MODULE
|
||||
# define MEM_MODULE
|
||||
# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
# else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
||||
|
||||
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
||||
static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
|
||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
|
||||
|
||||
static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
|
||||
|
||||
#else
|
||||
|
||||
/* portable and safe solution. Generally efficient.
|
||||
* see : http://stackoverflow.com/a/32095106/646947
|
||||
*/
|
||||
|
||||
static U32 XXH_read32(const void* memPtr)
|
||||
{
|
||||
U32 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static U64 XXH_read64(const void* memPtr)
|
||||
{
|
||||
U64 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Compiler-specific Functions and Macros
|
||||
******************************************/
|
||||
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
|
||||
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
|
||||
#if defined(_MSC_VER)
|
||||
# define XXH_rotl32(x,r) _rotl(x,r)
|
||||
# define XXH_rotl64(x,r) _rotl64(x,r)
|
||||
#else
|
||||
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
|
||||
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
# define XXH_swap32 _byteswap_ulong
|
||||
# define XXH_swap64 _byteswap_uint64
|
||||
#elif GCC_VERSION >= 403
|
||||
# define XXH_swap32 __builtin_bswap32
|
||||
# define XXH_swap64 __builtin_bswap64
|
||||
#else
|
||||
static U32 XXH_swap32 (U32 x)
|
||||
{
|
||||
return ((x << 24) & 0xff000000 ) |
|
||||
((x << 8) & 0x00ff0000 ) |
|
||||
((x >> 8) & 0x0000ff00 ) |
|
||||
((x >> 24) & 0x000000ff );
|
||||
}
|
||||
static U64 XXH_swap64 (U64 x)
|
||||
{
|
||||
return ((x << 56) & 0xff00000000000000ULL) |
|
||||
((x << 40) & 0x00ff000000000000ULL) |
|
||||
((x << 24) & 0x0000ff0000000000ULL) |
|
||||
((x << 8) & 0x000000ff00000000ULL) |
|
||||
((x >> 8) & 0x00000000ff000000ULL) |
|
||||
((x >> 24) & 0x0000000000ff0000ULL) |
|
||||
((x >> 40) & 0x000000000000ff00ULL) |
|
||||
((x >> 56) & 0x00000000000000ffULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Architecture Macros
|
||||
***************************************/
|
||||
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
||||
|
||||
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
|
||||
#ifndef XXH_CPU_LITTLE_ENDIAN
|
||||
static const int g_one = 1;
|
||||
# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
|
||||
#endif
|
||||
|
||||
|
||||
/* ***************************
|
||||
* Memory reads
|
||||
*****************************/
|
||||
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
||||
else
|
||||
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
||||
{
|
||||
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
|
||||
static U32 XXH_readBE32(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
||||
else
|
||||
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
||||
{
|
||||
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
|
||||
static U64 XXH_readBE64(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
|
||||
}
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Macros
|
||||
***************************************/
|
||||
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
static const U32 PRIME32_1 = 2654435761U;
|
||||
static const U32 PRIME32_2 = 2246822519U;
|
||||
static const U32 PRIME32_3 = 3266489917U;
|
||||
static const U32 PRIME32_4 = 668265263U;
|
||||
static const U32 PRIME32_5 = 374761393U;
|
||||
|
||||
static const U64 PRIME64_1 = 11400714785074694791ULL;
|
||||
static const U64 PRIME64_2 = 14029467366897019727ULL;
|
||||
static const U64 PRIME64_3 = 1609587929392839161ULL;
|
||||
static const U64 PRIME64_4 = 9650029242287828579ULL;
|
||||
static const U64 PRIME64_5 = 2870177450012600261ULL;
|
||||
|
||||
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
||||
|
||||
|
||||
/* **************************
|
||||
* Utils
|
||||
****************************/
|
||||
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
|
||||
{
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
|
||||
{
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
|
||||
/* ***************************
|
||||
* Simple Hash Functions
|
||||
*****************************/
|
||||
|
||||
static U32 XXH32_round(U32 seed, U32 input)
|
||||
{
|
||||
seed += input * PRIME32_2;
|
||||
seed = XXH_rotl32(seed, 13);
|
||||
seed *= PRIME32_1;
|
||||
return seed;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* bEnd = p + len;
|
||||
U32 h32;
|
||||
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (p==NULL) {
|
||||
len=0;
|
||||
bEnd=p=(const BYTE*)(size_t)16;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len>=16) {
|
||||
const BYTE* const limit = bEnd - 16;
|
||||
U32 v1 = seed + PRIME32_1 + PRIME32_2;
|
||||
U32 v2 = seed + PRIME32_2;
|
||||
U32 v3 = seed + 0;
|
||||
U32 v4 = seed - PRIME32_1;
|
||||
|
||||
do {
|
||||
v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
|
||||
v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
|
||||
v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
|
||||
v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
|
||||
} while (p<=limit);
|
||||
|
||||
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
|
||||
} else {
|
||||
h32 = seed + PRIME32_5;
|
||||
}
|
||||
|
||||
h32 += (U32) len;
|
||||
|
||||
while (p+4<=bEnd) {
|
||||
h32 += XXH_get32bits(p) * PRIME32_3;
|
||||
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h32 += (*p) * PRIME32_5;
|
||||
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
|
||||
p++;
|
||||
}
|
||||
|
||||
h32 ^= h32 >> 15;
|
||||
h32 *= PRIME32_2;
|
||||
h32 ^= h32 >> 13;
|
||||
h32 *= PRIME32_3;
|
||||
h32 ^= h32 >> 16;
|
||||
|
||||
return h32;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
|
||||
{
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH32_CREATESTATE_STATIC(state);
|
||||
XXH32_reset(state, seed);
|
||||
XXH32_update(state, input, len);
|
||||
return XXH32_digest(state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
||||
else
|
||||
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
||||
} }
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
||||
else
|
||||
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static U64 XXH64_round(U64 acc, U64 input)
|
||||
{
|
||||
acc += input * PRIME64_2;
|
||||
acc = XXH_rotl64(acc, 31);
|
||||
acc *= PRIME64_1;
|
||||
return acc;
|
||||
}
|
||||
|
||||
static U64 XXH64_mergeRound(U64 acc, U64 val)
|
||||
{
|
||||
val = XXH64_round(0, val);
|
||||
acc ^= val;
|
||||
acc = acc * PRIME64_1 + PRIME64_4;
|
||||
return acc;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
U64 h64;
|
||||
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (p==NULL) {
|
||||
len=0;
|
||||
bEnd=p=(const BYTE*)(size_t)32;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len>=32) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
U64 v2 = seed + PRIME64_2;
|
||||
U64 v3 = seed + 0;
|
||||
U64 v4 = seed - PRIME64_1;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
|
||||
v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
|
||||
v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
|
||||
v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
|
||||
} while (p<=limit);
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
h64 = XXH64_mergeRound(h64, v2);
|
||||
h64 = XXH64_mergeRound(h64, v3);
|
||||
h64 = XXH64_mergeRound(h64, v4);
|
||||
|
||||
} else {
|
||||
h64 = seed + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64) len;
|
||||
|
||||
while (p+8<=bEnd) {
|
||||
U64 const k1 = XXH64_round(0, XXH_get64bits(p));
|
||||
h64 ^= k1;
|
||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
||||
p+=8;
|
||||
}
|
||||
|
||||
if (p+4<=bEnd) {
|
||||
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
|
||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h64 ^= (*p) * PRIME64_5;
|
||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h64 ^= h64 >> 33;
|
||||
h64 *= PRIME64_2;
|
||||
h64 ^= h64 >> 29;
|
||||
h64 *= PRIME64_3;
|
||||
h64 ^= h64 >> 32;
|
||||
|
||||
return h64;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
|
||||
{
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH64_CREATESTATE_STATIC(state);
|
||||
XXH64_reset(state, seed);
|
||||
XXH64_update(state, input, len);
|
||||
return XXH64_digest(state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
||||
else
|
||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
||||
} }
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
||||
else
|
||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/* **************************************************
|
||||
* Advanced Hash Functions
|
||||
****************************************************/
|
||||
|
||||
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
|
||||
{
|
||||
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
|
||||
}
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
|
||||
{
|
||||
XXH_free(statePtr);
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
|
||||
{
|
||||
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
|
||||
}
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
|
||||
{
|
||||
XXH_free(statePtr);
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
|
||||
/*** Hash feed ***/
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
|
||||
{
|
||||
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
|
||||
state.v1 = seed + PRIME32_1 + PRIME32_2;
|
||||
state.v2 = seed + PRIME32_2;
|
||||
state.v3 = seed + 0;
|
||||
state.v4 = seed - PRIME32_1;
|
||||
memcpy(statePtr, &state, sizeof(state));
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
||||
{
|
||||
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
|
||||
state.v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
state.v2 = seed + PRIME64_2;
|
||||
state.v3 = seed + 0;
|
||||
state.v4 = seed - PRIME64_1;
|
||||
memcpy(statePtr, &state, sizeof(state));
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (input==NULL) return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
state->total_len_32 += (unsigned)len;
|
||||
state->large_len |= (len>=16) | (state->total_len_32>=16);
|
||||
|
||||
if (state->memsize + len < 16) { /* fill in tmp buffer */
|
||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
|
||||
state->memsize += (unsigned)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* some data left from previous update */
|
||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
|
||||
{ const U32* p32 = state->mem32;
|
||||
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
|
||||
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
|
||||
}
|
||||
p += 16-state->memsize;
|
||||
state->memsize = 0;
|
||||
}
|
||||
|
||||
if (p <= bEnd-16) {
|
||||
const BYTE* const limit = bEnd - 16;
|
||||
U32 v1 = state->v1;
|
||||
U32 v2 = state->v2;
|
||||
U32 v3 = state->v3;
|
||||
U32 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
|
||||
v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
|
||||
v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
|
||||
v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
|
||||
} while (p<=limit);
|
||||
|
||||
state->v1 = v1;
|
||||
state->v2 = v2;
|
||||
state->v3 = v3;
|
||||
state->v4 = v4;
|
||||
}
|
||||
|
||||
if (p < bEnd) {
|
||||
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
|
||||
state->memsize = (unsigned)(bEnd-p);
|
||||
}
|
||||
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
else
|
||||
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
|
||||
{
|
||||
const BYTE * p = (const BYTE*)state->mem32;
|
||||
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
|
||||
U32 h32;
|
||||
|
||||
if (state->large_len) {
|
||||
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
|
||||
} else {
|
||||
h32 = state->v3 /* == seed */ + PRIME32_5;
|
||||
}
|
||||
|
||||
h32 += state->total_len_32;
|
||||
|
||||
while (p+4<=bEnd) {
|
||||
h32 += XXH_readLE32(p, endian) * PRIME32_3;
|
||||
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h32 += (*p) * PRIME32_5;
|
||||
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h32 ^= h32 >> 15;
|
||||
h32 *= PRIME32_2;
|
||||
h32 ^= h32 >> 13;
|
||||
h32 *= PRIME32_3;
|
||||
h32 ^= h32 >> 16;
|
||||
|
||||
return h32;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH32_digest_endian(state_in, XXH_littleEndian);
|
||||
else
|
||||
return XXH32_digest_endian(state_in, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* **** XXH64 **** */
|
||||
|
||||
FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
|
||||
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
||||
if (input==NULL) return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
state->total_len += len;
|
||||
|
||||
if (state->memsize + len < 32) { /* fill in tmp buffer */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
||||
state->memsize += (U32)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* tmp buffer is full */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
|
||||
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
|
||||
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
|
||||
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
|
||||
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
|
||||
p += 32-state->memsize;
|
||||
state->memsize = 0;
|
||||
}
|
||||
|
||||
if (p+32 <= bEnd) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = state->v1;
|
||||
U64 v2 = state->v2;
|
||||
U64 v3 = state->v3;
|
||||
U64 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
|
||||
v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
|
||||
v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
|
||||
v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
|
||||
} while (p<=limit);
|
||||
|
||||
state->v1 = v1;
|
||||
state->v2 = v2;
|
||||
state->v3 = v3;
|
||||
state->v4 = v4;
|
||||
}
|
||||
|
||||
if (p < bEnd) {
|
||||
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
|
||||
state->memsize = (unsigned)(bEnd-p);
|
||||
}
|
||||
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
else
|
||||
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
||||
{
|
||||
const BYTE * p = (const BYTE*)state->mem64;
|
||||
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
|
||||
U64 h64;
|
||||
|
||||
if (state->total_len >= 32) {
|
||||
U64 const v1 = state->v1;
|
||||
U64 const v2 = state->v2;
|
||||
U64 const v3 = state->v3;
|
||||
U64 const v4 = state->v4;
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
h64 = XXH64_mergeRound(h64, v2);
|
||||
h64 = XXH64_mergeRound(h64, v3);
|
||||
h64 = XXH64_mergeRound(h64, v4);
|
||||
} else {
|
||||
h64 = state->v3 + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64) state->total_len;
|
||||
|
||||
while (p+8<=bEnd) {
|
||||
U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
|
||||
h64 ^= k1;
|
||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
||||
p+=8;
|
||||
}
|
||||
|
||||
if (p+4<=bEnd) {
|
||||
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
|
||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
||||
p+=4;
|
||||
}
|
||||
|
||||
while (p<bEnd) {
|
||||
h64 ^= (*p) * PRIME64_5;
|
||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
||||
p++;
|
||||
}
|
||||
|
||||
h64 ^= h64 >> 33;
|
||||
h64 *= PRIME64_2;
|
||||
h64 ^= h64 >> 29;
|
||||
h64 *= PRIME64_3;
|
||||
h64 ^= h64 >> 32;
|
||||
|
||||
return h64;
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
|
||||
{
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_digest_endian(state_in, XXH_littleEndian);
|
||||
else
|
||||
return XXH64_digest_endian(state_in, XXH_bigEndian);
|
||||
}
|
||||
|
||||
|
||||
/* **************************
|
||||
* Canonical representation
|
||||
****************************/
|
||||
|
||||
/*! Default XXH result types are basic unsigned 32 and 64 bits.
|
||||
* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
|
||||
* These functions allow transformation of hash result into and from its canonical format.
|
||||
* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
|
||||
*/
|
||||
|
||||
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
|
||||
{
|
||||
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
|
||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
|
||||
memcpy(dst, &hash, sizeof(*dst));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
|
||||
{
|
||||
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
|
||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
|
||||
memcpy(dst, &hash, sizeof(*dst));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
|
||||
{
|
||||
return XXH_readBE32(src);
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
|
||||
{
|
||||
return XXH_readBE64(src);
|
||||
}
|
305
third/github.com/DataDog/zstd/xxhash.h
Normal file
305
third/github.com/DataDog/zstd/xxhash.h
Normal file
@ -0,0 +1,305 @@
|
||||
/*
|
||||
xxHash - Extremely Fast Hash algorithm
|
||||
Header File
|
||||
Copyright (C) 2012-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
You can contact the author at :
|
||||
- xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
/* Notice extracted from xxHash homepage :
|
||||
|
||||
xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
|
||||
It also successfully passes all tests from the SMHasher suite.
|
||||
|
||||
Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
|
||||
|
||||
Name Speed Q.Score Author
|
||||
xxHash 5.4 GB/s 10
|
||||
CrapWow 3.2 GB/s 2 Andrew
|
||||
MumurHash 3a 2.7 GB/s 10 Austin Appleby
|
||||
SpookyHash 2.0 GB/s 10 Bob Jenkins
|
||||
SBox 1.4 GB/s 9 Bret Mulvey
|
||||
Lookup3 1.2 GB/s 9 Bob Jenkins
|
||||
SuperFastHash 1.2 GB/s 1 Paul Hsieh
|
||||
CityHash64 1.05 GB/s 10 Pike & Alakuijala
|
||||
FNV 0.55 GB/s 5 Fowler, Noll, Vo
|
||||
CRC32 0.43 GB/s 9
|
||||
MD5-32 0.33 GB/s 10 Ronald L. Rivest
|
||||
SHA1-32 0.28 GB/s 10
|
||||
|
||||
Q.Score is a measure of quality of the hash function.
|
||||
It depends on successfully passing SMHasher test set.
|
||||
10 is a perfect score.
|
||||
|
||||
A 64-bits version, named XXH64, is available since r35.
|
||||
It offers much better speed, but for 64-bits applications only.
|
||||
Name Speed on 64 bits Speed on 32 bits
|
||||
XXH64 13.8 GB/s 1.9 GB/s
|
||||
XXH32 6.8 GB/s 6.0 GB/s
|
||||
*/
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef XXHASH_H_5627135585666179
|
||||
#define XXHASH_H_5627135585666179 1
|
||||
|
||||
|
||||
/* ****************************
|
||||
* Definitions
|
||||
******************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
|
||||
|
||||
|
||||
/* ****************************
|
||||
* API modifier
|
||||
******************************/
|
||||
/** XXH_PRIVATE_API
|
||||
* This is useful if you want to include xxhash functions in `static` mode
|
||||
* in order to inline them, and remove their symbol from the public list.
|
||||
* Methodology :
|
||||
* #define XXH_PRIVATE_API
|
||||
* #include "xxhash.h"
|
||||
* `xxhash.c` is automatically included.
|
||||
* It's not useful to compile and link it as a separate module anymore.
|
||||
*/
|
||||
#ifdef XXH_PRIVATE_API
|
||||
# ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY
|
||||
# endif
|
||||
# if defined(__GNUC__)
|
||||
# define XXH_PUBLIC_API static __inline __attribute__((unused))
|
||||
# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define XXH_PUBLIC_API static inline
|
||||
# elif defined(_MSC_VER)
|
||||
# define XXH_PUBLIC_API static __inline
|
||||
# else
|
||||
# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
# endif
|
||||
#else
|
||||
# define XXH_PUBLIC_API /* do nothing */
|
||||
#endif /* XXH_PRIVATE_API */
|
||||
|
||||
/*!XXH_NAMESPACE, aka Namespace Emulation :
|
||||
|
||||
If you want to include _and expose_ xxHash functions from within your own library,
|
||||
but also want to avoid symbol collisions with another library which also includes xxHash,
|
||||
|
||||
you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
|
||||
with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
|
||||
|
||||
Note that no change is required within the calling program as long as it includes `xxhash.h` :
|
||||
regular symbol name will be automatically translated by this header.
|
||||
*/
|
||||
#ifdef XXH_NAMESPACE
|
||||
# define XXH_CAT(A,B) A##B
|
||||
# define XXH_NAME2(A,B) XXH_CAT(A,B)
|
||||
# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
|
||||
# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
|
||||
# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
|
||||
# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
|
||||
# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
|
||||
# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
|
||||
# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
|
||||
# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
|
||||
# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
|
||||
# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
|
||||
# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
|
||||
# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
|
||||
# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
|
||||
# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
|
||||
# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
|
||||
# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
|
||||
# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
|
||||
# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
|
||||
# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Version
|
||||
***************************************/
|
||||
#define XXH_VERSION_MAJOR 0
|
||||
#define XXH_VERSION_MINOR 6
|
||||
#define XXH_VERSION_RELEASE 2
|
||||
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
|
||||
XXH_PUBLIC_API unsigned XXH_versionNumber (void);
|
||||
|
||||
|
||||
/* ****************************
|
||||
* Simple Hash Functions
|
||||
******************************/
|
||||
typedef unsigned int XXH32_hash_t;
|
||||
typedef unsigned long long XXH64_hash_t;
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
|
||||
|
||||
/*!
|
||||
XXH32() :
|
||||
Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
|
||||
The memory between input & input+length must be valid (allocated and read-accessible).
|
||||
"seed" can be used to alter the result predictably.
|
||||
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
|
||||
XXH64() :
|
||||
Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
|
||||
"seed" can be used to alter the result predictably.
|
||||
This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
|
||||
*/
|
||||
|
||||
|
||||
/* ****************************
|
||||
* Streaming Hash Functions
|
||||
******************************/
|
||||
typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
|
||||
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
|
||||
|
||||
/*! State allocation, compatible with dynamic libraries */
|
||||
|
||||
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
|
||||
|
||||
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
|
||||
|
||||
|
||||
/* hash streaming */
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
|
||||
|
||||
/*
|
||||
These functions generate the xxHash of an input provided in multiple segments.
|
||||
Note that, for small input, they are slower than single-call functions, due to state management.
|
||||
For small input, prefer `XXH32()` and `XXH64()` .
|
||||
|
||||
XXH state must first be allocated, using XXH*_createState() .
|
||||
|
||||
Start a new hash by initializing state with a seed, using XXH*_reset().
|
||||
|
||||
Then, feed the hash state by calling XXH*_update() as many times as necessary.
|
||||
Obviously, input must be allocated and read accessible.
|
||||
The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
|
||||
|
||||
Finally, a hash value can be produced anytime, by using XXH*_digest().
|
||||
This function returns the nn-bits hash as an int or long long.
|
||||
|
||||
It's still possible to continue inserting input into the hash state after a digest,
|
||||
and generate some new hashes later on, by calling again XXH*_digest().
|
||||
|
||||
When done, free XXH state space if it was allocated dynamically.
|
||||
*/
|
||||
|
||||
|
||||
/* **************************
|
||||
* Utils
|
||||
****************************/
|
||||
#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */
|
||||
# define restrict /* disable restrict */
|
||||
#endif
|
||||
|
||||
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);
|
||||
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);
|
||||
|
||||
|
||||
/* **************************
|
||||
* Canonical representation
|
||||
****************************/
|
||||
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
|
||||
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
|
||||
* These functions allow transformation of hash result into and from its canonical format.
|
||||
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
|
||||
*/
|
||||
typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
|
||||
typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
|
||||
|
||||
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
|
||||
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
|
||||
|
||||
#endif /* XXHASH_H_5627135585666179 */
|
||||
|
||||
|
||||
|
||||
/* ================================================================================================
|
||||
This section contains definitions which are not guaranteed to remain stable.
|
||||
They may change in future versions, becoming incompatible with a different version of the library.
|
||||
They shall only be used with static linking.
|
||||
Never use these definitions in association with dynamic linking !
|
||||
=================================================================================================== */
|
||||
#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
|
||||
#define XXH_STATIC_H_3543687687345
|
||||
|
||||
/* These definitions are only meant to allow allocation of XXH state
|
||||
statically, on stack, or in a struct for example.
|
||||
Do not use members directly. */
|
||||
|
||||
struct XXH32_state_s {
|
||||
unsigned total_len_32;
|
||||
unsigned large_len;
|
||||
unsigned v1;
|
||||
unsigned v2;
|
||||
unsigned v3;
|
||||
unsigned v4;
|
||||
unsigned mem32[4]; /* buffer defined as U32 for alignment */
|
||||
unsigned memsize;
|
||||
unsigned reserved; /* never read nor write, will be removed in a future version */
|
||||
}; /* typedef'd to XXH32_state_t */
|
||||
|
||||
struct XXH64_state_s {
|
||||
unsigned long long total_len;
|
||||
unsigned long long v1;
|
||||
unsigned long long v2;
|
||||
unsigned long long v3;
|
||||
unsigned long long v4;
|
||||
unsigned long long mem64[4]; /* buffer defined as U64 for alignment */
|
||||
unsigned memsize;
|
||||
unsigned reserved[2]; /* never read nor write, will be removed in a future version */
|
||||
}; /* typedef'd to XXH64_state_t */
|
||||
|
||||
|
||||
# ifdef XXH_PRIVATE_API
|
||||
# include "xxhash.c" /* include xxhash functions as `static`, for inlining */
|
||||
# endif
|
||||
|
||||
#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
213
third/github.com/DataDog/zstd/zbuff.h
Normal file
213
third/github.com/DataDog/zstd/zbuff.h
Normal file
@ -0,0 +1,213 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* ***************************************************************
|
||||
* NOTES/WARNINGS
|
||||
******************************************************************/
|
||||
/* The streaming API defined here is deprecated.
|
||||
* Consider migrating towards ZSTD_compressStream() API in `zstd.h`
|
||||
* See 'lib/README.md'.
|
||||
*****************************************************************/
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef ZSTD_BUFFERED_H_23987
|
||||
#define ZSTD_BUFFERED_H_23987
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */
|
||||
|
||||
|
||||
/* ***************************************************************
|
||||
* Compiler specifics
|
||||
*****************************************************************/
|
||||
/* Deprecation warnings */
|
||||
/* Should these warnings be a problem,
|
||||
it is generally possible to disable them,
|
||||
typically with -Wno-deprecated-declarations for gcc
|
||||
or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */
|
||||
#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */
|
||||
#else
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
|
||||
# elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__)
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler")
|
||||
# define ZBUFF_DEPRECATED(message) ZSTDLIB_API
|
||||
# endif
|
||||
#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
/* This is the easier "buffered" streaming API,
|
||||
* using an internal buffer to lift all restrictions on user-provided buffers
|
||||
* which can be any size, any place, for both input and output.
|
||||
* ZBUFF and ZSTD are 100% interoperable,
|
||||
* frames created by one can be decoded by the other one */
|
||||
|
||||
typedef ZSTD_CStream ZBUFF_CCtx;
|
||||
ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_freeCStream") size_t ZBUFF_freeCCtx(ZBUFF_CCtx* cctx);
|
||||
|
||||
ZBUFF_DEPRECATED("use ZSTD_initCStream") size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel);
|
||||
ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
|
||||
|
||||
ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr);
|
||||
ZBUFF_DEPRECATED("use ZSTD_flushStream") size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
|
||||
ZBUFF_DEPRECATED("use ZSTD_endStream") size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
|
||||
|
||||
/*-*************************************************
|
||||
* Streaming compression - howto
|
||||
*
|
||||
* A ZBUFF_CCtx object is required to track streaming operation.
|
||||
* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
|
||||
* ZBUFF_CCtx objects can be reused multiple times.
|
||||
*
|
||||
* Start by initializing ZBUF_CCtx.
|
||||
* Use ZBUFF_compressInit() to start a new compression operation.
|
||||
* Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary.
|
||||
*
|
||||
* Use ZBUFF_compressContinue() repetitively to consume input stream.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data.
|
||||
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst .
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush().
|
||||
* The nb of bytes written into `dst` will be reported into *dstCapacityPtr.
|
||||
* Note that the function cannot output more than *dstCapacityPtr,
|
||||
* therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small.
|
||||
* @return : nb of bytes still present into internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* ZBUFF_compressEnd() instructs to finish a frame.
|
||||
* It will perform a flush and write frame epilogue.
|
||||
* The epilogue is required for decoders to consider a frame completed.
|
||||
* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
|
||||
* In which case, call again ZBUFF_compressFlush() to complete the flush.
|
||||
* @return : nb of bytes still present into internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize()
|
||||
* input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency)
|
||||
* output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering.
|
||||
* By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering.
|
||||
* **************************************************/
|
||||
|
||||
|
||||
typedef ZSTD_DStream ZBUFF_DCtx;
|
||||
ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_freeDStream") size_t ZBUFF_freeDCtx(ZBUFF_DCtx* dctx);
|
||||
|
||||
ZBUFF_DEPRECATED("use ZSTD_initDStream") size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx);
|
||||
ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr);
|
||||
|
||||
/*-***************************************************************************
|
||||
* Streaming decompression howto
|
||||
*
|
||||
* A ZBUFF_DCtx object is required to track streaming operations.
|
||||
* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
|
||||
* Use ZBUFF_decompressInit() to start a new decompression operation,
|
||||
* or ZBUFF_decompressInitDictionary() if decompression requires a dictionary.
|
||||
* Note that ZBUFF_DCtx objects can be re-init multiple times.
|
||||
*
|
||||
* Use ZBUFF_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
|
||||
* @return : 0 when a frame is completely decoded and fully flushed,
|
||||
* 1 when there is still some data left within internal buffer to flush,
|
||||
* >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency),
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize()
|
||||
* output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
|
||||
* input : ZBUFF_recommendedDInSize == 128KB + 3;
|
||||
* just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* *******************************************************************************/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
ZBUFF_DEPRECATED("use ZSTD_isError") unsigned ZBUFF_isError(size_t errorCode);
|
||||
ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode);
|
||||
|
||||
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are just hints, they tend to offer better latency */
|
||||
ZBUFF_DEPRECATED("use ZSTD_CStreamInSize") size_t ZBUFF_recommendedCInSize(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_DStreamInSize") size_t ZBUFF_recommendedDInSize(void);
|
||||
ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void);
|
||||
|
||||
#endif /* ZSTD_BUFFERED_H_23987 */
|
||||
|
||||
|
||||
#ifdef ZBUFF_STATIC_LINKING_ONLY
|
||||
#ifndef ZBUFF_STATIC_H_30298098432
|
||||
#define ZBUFF_STATIC_H_30298098432
|
||||
|
||||
/* ====================================================================================
|
||||
* The definitions in this section are considered experimental.
|
||||
* They should never be used in association with a dynamic library, as they may change in the future.
|
||||
* They are provided for advanced usages.
|
||||
* Use them only in association with static linking.
|
||||
* ==================================================================================== */
|
||||
|
||||
/*--- Dependency ---*/
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */
|
||||
#include "zstd.h"
|
||||
|
||||
|
||||
/*--- Custom memory allocator ---*/
|
||||
/*! ZBUFF_createCCtx_advanced() :
|
||||
* Create a ZBUFF compression context using external alloc and free functions */
|
||||
ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem);
|
||||
|
||||
/*! ZBUFF_createDCtx_advanced() :
|
||||
* Create a ZBUFF decompression context using external alloc and free functions */
|
||||
ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem);
|
||||
|
||||
|
||||
/*--- Advanced Streaming Initialization ---*/
|
||||
ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params, unsigned long long pledgedSrcSize);
|
||||
|
||||
|
||||
#endif /* ZBUFF_STATIC_H_30298098432 */
|
||||
#endif /* ZBUFF_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
26
third/github.com/DataDog/zstd/zbuff_common.c
Normal file
26
third/github.com/DataDog/zstd/zbuff_common.c
Normal file
@ -0,0 +1,26 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "error_private.h"
|
||||
#include "zbuff.h"
|
||||
|
||||
/*-****************************************
|
||||
* ZBUFF Error Management (deprecated)
|
||||
******************************************/
|
||||
|
||||
/*! ZBUFF_isError() :
|
||||
* tells if a return value is an error code */
|
||||
unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
|
||||
/*! ZBUFF_getErrorName() :
|
||||
* provides error code string from function result (useful for debugging) */
|
||||
const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
|
147
third/github.com/DataDog/zstd/zbuff_compress.c
Normal file
147
third/github.com/DataDog/zstd/zbuff_compress.c
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#define ZBUFF_STATIC_LINKING_ONLY
|
||||
#include "zbuff.h"
|
||||
|
||||
|
||||
/*-***********************************************************
|
||||
* Streaming compression
|
||||
*
|
||||
* A ZBUFF_CCtx object is required to track streaming operation.
|
||||
* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
|
||||
* Use ZBUFF_compressInit() to start a new compression operation.
|
||||
* ZBUFF_CCtx objects can be reused multiple times.
|
||||
*
|
||||
* Use ZBUFF_compressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
|
||||
* The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst .
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer.
|
||||
* Note that it will not output more than *dstCapacityPtr.
|
||||
* Therefore, some content might still be left into its internal buffer if dst buffer is too small.
|
||||
* @return : nb of bytes still present into internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* ZBUFF_compressEnd() instructs to finish a frame.
|
||||
* It will perform a flush and write frame epilogue.
|
||||
* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
|
||||
* @return : nb of bytes still present into internal buffer (0 if it's empty)
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory)
|
||||
* input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value.
|
||||
* output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed.
|
||||
* ***********************************************************/
|
||||
|
||||
ZBUFF_CCtx* ZBUFF_createCCtx(void)
|
||||
{
|
||||
return ZSTD_createCStream();
|
||||
}
|
||||
|
||||
ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem)
|
||||
{
|
||||
return ZSTD_createCStream_advanced(customMem);
|
||||
}
|
||||
|
||||
size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc)
|
||||
{
|
||||
return ZSTD_freeCStream(zbc);
|
||||
}
|
||||
|
||||
|
||||
/* ====== Initialization ====== */
|
||||
|
||||
size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params, unsigned long long pledgedSrcSize)
|
||||
{
|
||||
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* preserve "0 == unknown" behavior */
|
||||
return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize);
|
||||
}
|
||||
|
||||
|
||||
size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel)
|
||||
{
|
||||
return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel);
|
||||
}
|
||||
|
||||
size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel)
|
||||
{
|
||||
return ZSTD_initCStream(zbc, compressionLevel);
|
||||
}
|
||||
|
||||
/* ====== Compression ====== */
|
||||
|
||||
|
||||
size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr)
|
||||
{
|
||||
size_t result;
|
||||
ZSTD_outBuffer outBuff;
|
||||
ZSTD_inBuffer inBuff;
|
||||
outBuff.dst = dst;
|
||||
outBuff.pos = 0;
|
||||
outBuff.size = *dstCapacityPtr;
|
||||
inBuff.src = src;
|
||||
inBuff.pos = 0;
|
||||
inBuff.size = *srcSizePtr;
|
||||
result = ZSTD_compressStream(zbc, &outBuff, &inBuff);
|
||||
*dstCapacityPtr = outBuff.pos;
|
||||
*srcSizePtr = inBuff.pos;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* ====== Finalize ====== */
|
||||
|
||||
size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
|
||||
{
|
||||
size_t result;
|
||||
ZSTD_outBuffer outBuff;
|
||||
outBuff.dst = dst;
|
||||
outBuff.pos = 0;
|
||||
outBuff.size = *dstCapacityPtr;
|
||||
result = ZSTD_flushStream(zbc, &outBuff);
|
||||
*dstCapacityPtr = outBuff.pos;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
|
||||
{
|
||||
size_t result;
|
||||
ZSTD_outBuffer outBuff;
|
||||
outBuff.dst = dst;
|
||||
outBuff.pos = 0;
|
||||
outBuff.size = *dstCapacityPtr;
|
||||
result = ZSTD_endStream(zbc, &outBuff);
|
||||
*dstCapacityPtr = outBuff.pos;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
size_t ZBUFF_recommendedCInSize(void) { return ZSTD_CStreamInSize(); }
|
||||
size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); }
|
75
third/github.com/DataDog/zstd/zbuff_decompress.c
Normal file
75
third/github.com/DataDog/zstd/zbuff_decompress.c
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#define ZBUFF_STATIC_LINKING_ONLY
|
||||
#include "zbuff.h"
|
||||
|
||||
|
||||
ZBUFF_DCtx* ZBUFF_createDCtx(void)
|
||||
{
|
||||
return ZSTD_createDStream();
|
||||
}
|
||||
|
||||
ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem)
|
||||
{
|
||||
return ZSTD_createDStream_advanced(customMem);
|
||||
}
|
||||
|
||||
size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd)
|
||||
{
|
||||
return ZSTD_freeDStream(zbd);
|
||||
}
|
||||
|
||||
|
||||
/* *** Initialization *** */
|
||||
|
||||
size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize)
|
||||
{
|
||||
return ZSTD_initDStream_usingDict(zbd, dict, dictSize);
|
||||
}
|
||||
|
||||
size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd)
|
||||
{
|
||||
return ZSTD_initDStream(zbd);
|
||||
}
|
||||
|
||||
|
||||
/* *** Decompression *** */
|
||||
|
||||
size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr)
|
||||
{
|
||||
ZSTD_outBuffer outBuff;
|
||||
ZSTD_inBuffer inBuff;
|
||||
size_t result;
|
||||
outBuff.dst = dst;
|
||||
outBuff.pos = 0;
|
||||
outBuff.size = *dstCapacityPtr;
|
||||
inBuff.src = src;
|
||||
inBuff.pos = 0;
|
||||
inBuff.size = *srcSizePtr;
|
||||
result = ZSTD_decompressStream(zbd, &outBuff, &inBuff);
|
||||
*dstCapacityPtr = outBuff.pos;
|
||||
*srcSizePtr = inBuff.pos;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
size_t ZBUFF_recommendedDInSize(void) { return ZSTD_DStreamInSize(); }
|
||||
size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); }
|
1108
third/github.com/DataDog/zstd/zdict.c
Normal file
1108
third/github.com/DataDog/zstd/zdict.c
Normal file
File diff suppressed because it is too large
Load Diff
212
third/github.com/DataDog/zstd/zdict.h
Normal file
212
third/github.com/DataDog/zstd/zdict.h
Normal file
@ -0,0 +1,212 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef DICTBUILDER_H_001
|
||||
#define DICTBUILDER_H_001
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/*====== Dependencies ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ===== ZDICTLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZDICTLIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define ZDICTLIB_VISIBILITY
|
||||
# endif
|
||||
#endif
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZDICTLIB_API ZDICTLIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
|
||||
/*! ZDICT_trainFromBuffer():
|
||||
* Train a dictionary from an array of samples.
|
||||
* Redirect towards ZDICT_optimizeTrainFromBuffer_cover() single-threaded, with d=8 and steps=4.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* Note: ZDICT_trainFromBuffer() requires about 9 bytes of memory for each input byte.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
|
||||
|
||||
|
||||
/*====== Helper functions ======*/
|
||||
ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */
|
||||
ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
|
||||
ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
|
||||
|
||||
|
||||
|
||||
#ifdef ZDICT_STATIC_LINKING_ONLY
|
||||
|
||||
/* ====================================================================================
|
||||
* The definitions in this section are considered experimental.
|
||||
* They should never be used with a dynamic library, as they may change in the future.
|
||||
* They are provided for advanced usages.
|
||||
* Use them only in association with static linking.
|
||||
* ==================================================================================== */
|
||||
|
||||
typedef struct {
|
||||
int compressionLevel; /* optimize for a specific zstd compression level; 0 means default */
|
||||
unsigned notificationLevel; /* Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
|
||||
unsigned dictID; /* force dictID value; 0 means auto mode (32-bits random value) */
|
||||
} ZDICT_params_t;
|
||||
|
||||
/*! ZDICT_cover_params_t:
|
||||
* k and d are the only required parameters.
|
||||
* For others, value 0 means default.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
|
||||
unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
|
||||
unsigned steps; /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */
|
||||
unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
|
||||
ZDICT_params_t zParams;
|
||||
} ZDICT_cover_params_t;
|
||||
|
||||
|
||||
/*! ZDICT_trainFromBuffer_cover():
|
||||
* Train a dictionary from an array of samples using the COVER algorithm.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
|
||||
void *dictBuffer, size_t dictBufferCapacity,
|
||||
const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
|
||||
ZDICT_cover_params_t parameters);
|
||||
|
||||
/*! ZDICT_optimizeTrainFromBuffer_cover():
|
||||
* The same requirements as above hold for all the parameters except `parameters`.
|
||||
* This function tries many parameter combinations and picks the best parameters.
|
||||
* `*parameters` is filled with the best parameters found,
|
||||
* dictionary constructed with those parameters is stored in `dictBuffer`.
|
||||
*
|
||||
* All of the parameters d, k, steps are optional.
|
||||
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
|
||||
* if steps is zero it defaults to its default value.
|
||||
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* On success `*parameters` contains the parameters selected.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
|
||||
void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_cover_params_t* parameters);
|
||||
|
||||
/*! ZDICT_finalizeDictionary():
|
||||
* Given a custom content as a basis for dictionary, and a set of samples,
|
||||
* finalize dictionary by adding headers and statistics.
|
||||
*
|
||||
* Samples must be stored concatenated in a flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
|
||||
*
|
||||
* dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.
|
||||
* maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
|
||||
*
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
|
||||
* or an error code, which can be tested by ZDICT_isError().
|
||||
* Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
|
||||
* Note 2: dictBuffer and dictContent can overlap
|
||||
*/
|
||||
#define ZDICT_CONTENTSIZE_MIN 128
|
||||
#define ZDICT_DICTSIZE_MIN 256
|
||||
ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* dictContent, size_t dictContentSize,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_params_t parameters);
|
||||
|
||||
typedef struct {
|
||||
unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */
|
||||
ZDICT_params_t zParams;
|
||||
} ZDICT_legacy_params_t;
|
||||
|
||||
/*! ZDICT_trainFromBuffer_legacy():
|
||||
* Train a dictionary from an array of samples.
|
||||
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
|
||||
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
|
||||
* The resulting dictionary will be saved into `dictBuffer`.
|
||||
* `parameters` is optional and can be provided with values set to 0 to mean "default".
|
||||
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
|
||||
* or an error code, which can be tested with ZDICT_isError().
|
||||
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
|
||||
* It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
|
||||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
* Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
|
||||
void *dictBuffer, size_t dictBufferCapacity,
|
||||
const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
|
||||
ZDICT_legacy_params_t parameters);
|
||||
|
||||
/* Deprecation warnings */
|
||||
/* It is generally possible to disable deprecation warnings from compiler,
|
||||
for example with -Wno-deprecated-declarations for gcc
|
||||
or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
|
||||
#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */
|
||||
#else
|
||||
# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
|
||||
# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
|
||||
# elif (ZDICT_GCC_VERSION >= 301)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API
|
||||
# endif
|
||||
#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
|
||||
size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
|
||||
|
||||
|
||||
#endif /* ZDICT_STATIC_LINKING_ONLY */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* DICTBUILDER_H_001 */
|
144
third/github.com/DataDog/zstd/zstd.go
Normal file
144
third/github.com/DataDog/zstd/zstd.go
Normal file
@ -0,0 +1,144 @@
|
||||
package zstd
|
||||
|
||||
/*
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "zstd.h"
|
||||
#include "stdint.h" // for uintptr_t
|
||||
|
||||
// The following *_wrapper function are used for removing superflouos
|
||||
// memory allocations when calling the wrapped functions from Go code.
|
||||
// See https://github.com/golang/go/issues/24450 for details.
|
||||
|
||||
static size_t ZSTD_compress_wrapper(uintptr_t dst, size_t maxDstSize, const uintptr_t src, size_t srcSize, int compressionLevel) {
|
||||
return ZSTD_compress((void*)dst, maxDstSize, (const void*)src, srcSize, compressionLevel);
|
||||
}
|
||||
|
||||
static size_t ZSTD_decompress_wrapper(uintptr_t dst, size_t maxDstSize, uintptr_t src, size_t srcSize) {
|
||||
return ZSTD_decompress((void*)dst, maxDstSize, (const void *)src, srcSize);
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Defines best and standard values for zstd cli
|
||||
const (
|
||||
BestSpeed = 1
|
||||
BestCompression = 20
|
||||
DefaultCompression = 5
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEmptySlice is returned when there is nothing to compress
|
||||
ErrEmptySlice = errors.New("Bytes slice is empty")
|
||||
)
|
||||
|
||||
// CompressBound returns the worst case size needed for a destination buffer,
|
||||
// which can be used to preallocate a destination buffer or select a previously
|
||||
// allocated buffer from a pool.
|
||||
// See zstd.h to mirror implementation of ZSTD_COMPRESSBOUND
|
||||
func CompressBound(srcSize int) int {
|
||||
lowLimit := 128 << 10 // 128 kB
|
||||
var margin int
|
||||
if srcSize < lowLimit {
|
||||
margin = (lowLimit - srcSize) >> 11
|
||||
}
|
||||
return srcSize + (srcSize >> 8) + margin
|
||||
}
|
||||
|
||||
// cCompressBound is a cgo call to check the go implementation above against the c code.
|
||||
func cCompressBound(srcSize int) int {
|
||||
return int(C.ZSTD_compressBound(C.size_t(srcSize)))
|
||||
}
|
||||
|
||||
// Compress src into dst. If you have a buffer to use, you can pass it to
|
||||
// prevent allocation. If it is too small, or if nil is passed, a new buffer
|
||||
// will be allocated and returned.
|
||||
func Compress(dst, src []byte) ([]byte, error) {
|
||||
return CompressLevel(dst, src, DefaultCompression)
|
||||
}
|
||||
|
||||
// CompressLevel is the same as Compress but you can pass a compression level
|
||||
func CompressLevel(dst, src []byte, level int) ([]byte, error) {
|
||||
bound := CompressBound(len(src))
|
||||
if cap(dst) >= bound {
|
||||
dst = dst[0:bound] // Reuse dst buffer
|
||||
} else {
|
||||
dst = make([]byte, bound)
|
||||
}
|
||||
|
||||
srcPtr := C.uintptr_t(uintptr(0)) // Do not point anywhere, if src is empty
|
||||
if len(src) > 0 {
|
||||
srcPtr = C.uintptr_t(uintptr(unsafe.Pointer(&src[0])))
|
||||
}
|
||||
|
||||
cWritten := C.ZSTD_compress_wrapper(
|
||||
C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
|
||||
C.size_t(len(dst)),
|
||||
srcPtr,
|
||||
C.size_t(len(src)),
|
||||
C.int(level))
|
||||
|
||||
written := int(cWritten)
|
||||
// Check if the return is an Error code
|
||||
if err := getError(written); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:written], nil
|
||||
}
|
||||
|
||||
// Decompress src into dst. If you have a buffer to use, you can pass it to
|
||||
// prevent allocation. If it is too small, or if nil is passed, a new buffer
|
||||
// will be allocated and returned.
|
||||
func Decompress(dst, src []byte) ([]byte, error) {
|
||||
if len(src) == 0 {
|
||||
return []byte{}, ErrEmptySlice
|
||||
}
|
||||
decompress := func(dst, src []byte) ([]byte, error) {
|
||||
|
||||
cWritten := C.ZSTD_decompress_wrapper(
|
||||
C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
|
||||
C.size_t(len(dst)),
|
||||
C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
|
||||
C.size_t(len(src)))
|
||||
|
||||
written := int(cWritten)
|
||||
// Check error
|
||||
if err := getError(written); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:written], nil
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
// Attempt to use zStd to determine decompressed size (may result in error or 0)
|
||||
size := int(C.size_t(C.ZSTD_getDecompressedSize(unsafe.Pointer(&src[0]), C.size_t(len(src)))))
|
||||
|
||||
if err := getError(size); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
dst = make([]byte, size)
|
||||
} else {
|
||||
dst = make([]byte, len(src)*3) // starting guess
|
||||
}
|
||||
}
|
||||
for i := 0; i < 3; i++ { // 3 tries to allocate a bigger buffer
|
||||
result, err := decompress(dst, src)
|
||||
if !IsDstSizeTooSmallError(err) {
|
||||
return result, err
|
||||
}
|
||||
dst = make([]byte, len(dst)*2) // Grow buffer by 2
|
||||
}
|
||||
|
||||
// We failed getting a dst buffer of correct size, use stream API
|
||||
r := NewReader(bytes.NewReader(src))
|
||||
defer r.Close()
|
||||
return ioutil.ReadAll(r)
|
||||
}
|
1399
third/github.com/DataDog/zstd/zstd.h
Normal file
1399
third/github.com/DataDog/zstd/zstd.h
Normal file
File diff suppressed because it is too large
Load Diff
86
third/github.com/DataDog/zstd/zstd_common.c
Normal file
86
third/github.com/DataDog/zstd/zstd_common.c
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include <stdlib.h> /* malloc, calloc, free */
|
||||
#include <string.h> /* memset */
|
||||
#include "error_private.h"
|
||||
#include "zstd_internal.h"
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Version
|
||||
******************************************/
|
||||
unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
|
||||
|
||||
const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* ZSTD Error Management
|
||||
******************************************/
|
||||
/*! ZSTD_isError() :
|
||||
* tells if a return value is an error code */
|
||||
unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
|
||||
|
||||
/*! ZSTD_getErrorName() :
|
||||
* provides error code string from function result (useful for debugging) */
|
||||
const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
/*! ZSTD_getError() :
|
||||
* convert a `size_t` function result into a proper ZSTD_errorCode enum */
|
||||
ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
|
||||
|
||||
/*! ZSTD_getErrorString() :
|
||||
* provides error code string from enum */
|
||||
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
|
||||
|
||||
/*! g_debuglog_enable :
|
||||
* turn on/off debug traces (global switch) */
|
||||
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 2)
|
||||
int g_debuglog_enable = 1;
|
||||
#endif
|
||||
|
||||
|
||||
/*=**************************************************************
|
||||
* Custom allocator
|
||||
****************************************************************/
|
||||
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc)
|
||||
return customMem.customAlloc(customMem.opaque, size);
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
void* ZSTD_calloc(size_t size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc) {
|
||||
/* calloc implemented as malloc+memset;
|
||||
* not as efficient as calloc, but next best guess for custom malloc */
|
||||
void* const ptr = customMem.customAlloc(customMem.opaque, size);
|
||||
memset(ptr, 0, size);
|
||||
return ptr;
|
||||
}
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
void ZSTD_free(void* ptr, ZSTD_customMem customMem)
|
||||
{
|
||||
if (ptr!=NULL) {
|
||||
if (customMem.customFree)
|
||||
customMem.customFree(customMem.opaque, ptr);
|
||||
else
|
||||
free(ptr);
|
||||
}
|
||||
}
|
3449
third/github.com/DataDog/zstd/zstd_compress.c
Normal file
3449
third/github.com/DataDog/zstd/zstd_compress.c
Normal file
File diff suppressed because it is too large
Load Diff
709
third/github.com/DataDog/zstd/zstd_compress_internal.h
Normal file
709
third/github.com/DataDog/zstd/zstd_compress_internal.h
Normal file
@ -0,0 +1,709 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* This header contains definitions
|
||||
* that shall **only** be used by modules within lib/compress.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPRESS_H
|
||||
#define ZSTD_COMPRESS_H
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_internal.h"
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
# include "zstdmt_compress.h"
|
||||
#endif
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
#define kSearchStrength 8
|
||||
#define HASH_READ_SIZE 8
|
||||
#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index 1 now means "unsorted".
|
||||
It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
|
||||
It's not a big deal though : candidate will just be sorted again.
|
||||
Additionnally, candidate position 1 will be lost.
|
||||
But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
|
||||
The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Context memory management
|
||||
***************************************/
|
||||
typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
|
||||
typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
|
||||
|
||||
typedef struct ZSTD_prefixDict_s {
|
||||
const void* dict;
|
||||
size_t dictSize;
|
||||
ZSTD_dictContentType_e dictContentType;
|
||||
} ZSTD_prefixDict;
|
||||
|
||||
typedef struct {
|
||||
U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
|
||||
FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
|
||||
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
|
||||
FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
|
||||
HUF_repeat hufCTable_repeatMode;
|
||||
FSE_repeat offcode_repeatMode;
|
||||
FSE_repeat matchlength_repeatMode;
|
||||
FSE_repeat litlength_repeatMode;
|
||||
} ZSTD_entropyCTables_t;
|
||||
|
||||
typedef struct {
|
||||
U32 off;
|
||||
U32 len;
|
||||
} ZSTD_match_t;
|
||||
|
||||
typedef struct {
|
||||
int price;
|
||||
U32 off;
|
||||
U32 mlen;
|
||||
U32 litlen;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
} ZSTD_optimal_t;
|
||||
|
||||
typedef struct {
|
||||
/* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
|
||||
U32* litFreq; /* table of literals statistics, of size 256 */
|
||||
U32* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
|
||||
U32* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
|
||||
U32* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
|
||||
ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
|
||||
ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
|
||||
|
||||
U32 litSum; /* nb of literals */
|
||||
U32 litLengthSum; /* nb of litLength codes */
|
||||
U32 matchLengthSum; /* nb of matchLength codes */
|
||||
U32 offCodeSum; /* nb of offset codes */
|
||||
/* begin updated by ZSTD_setLog2Prices */
|
||||
U32 log2litSum; /* pow2 to compare log2(litfreq) to */
|
||||
U32 log2litLengthSum; /* pow2 to compare log2(llfreq) to */
|
||||
U32 log2matchLengthSum; /* pow2 to compare log2(mlfreq) to */
|
||||
U32 log2offCodeSum; /* pow2 to compare log2(offreq) to */
|
||||
/* end : updated by ZSTD_setLog2Prices */
|
||||
U32 staticPrices; /* prices follow a pre-defined cost structure, statistics are irrelevant */
|
||||
} optState_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_entropyCTables_t entropy;
|
||||
U32 rep[ZSTD_REP_NUM];
|
||||
} ZSTD_compressedBlockState_t;
|
||||
|
||||
typedef struct {
|
||||
BYTE const* nextSrc; /* next block here to continue on current prefix */
|
||||
BYTE const* base; /* All regular indexes relative to this position */
|
||||
BYTE const* dictBase; /* extDict indexes relative to this position */
|
||||
U32 dictLimit; /* below that point, need extDict */
|
||||
U32 lowLimit; /* below that point, no more data */
|
||||
} ZSTD_window_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_window_t window; /* State for window round buffer management */
|
||||
U32 loadedDictEnd; /* index of end of dictionary */
|
||||
U32 nextToUpdate; /* index from which to continue table update */
|
||||
U32 nextToUpdate3; /* index from which to continue table update */
|
||||
U32 hashLog3; /* dispatch table : larger == faster, more memory */
|
||||
U32* hashTable;
|
||||
U32* hashTable3;
|
||||
U32* chainTable;
|
||||
optState_t opt; /* optimal parser state */
|
||||
} ZSTD_matchState_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_compressedBlockState_t* prevCBlock;
|
||||
ZSTD_compressedBlockState_t* nextCBlock;
|
||||
ZSTD_matchState_t matchState;
|
||||
} ZSTD_blockState_t;
|
||||
|
||||
typedef struct {
|
||||
U32 offset;
|
||||
U32 checksum;
|
||||
} ldmEntry_t;
|
||||
|
||||
typedef struct {
|
||||
ZSTD_window_t window; /* State for the window round buffer management */
|
||||
ldmEntry_t* hashTable;
|
||||
BYTE* bucketOffsets; /* Next position in bucket to insert entry */
|
||||
U64 hashPower; /* Used to compute the rolling hash.
|
||||
* Depends on ldmParams.minMatchLength */
|
||||
} ldmState_t;
|
||||
|
||||
typedef struct {
|
||||
U32 enableLdm; /* 1 if enable long distance matching */
|
||||
U32 hashLog; /* Log size of hashTable */
|
||||
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
|
||||
U32 minMatchLength; /* Minimum match length */
|
||||
U32 hashEveryLog; /* Log number of entries to skip */
|
||||
U32 windowLog; /* Window log for the LDM */
|
||||
} ldmParams_t;
|
||||
|
||||
typedef struct {
|
||||
U32 offset;
|
||||
U32 litLength;
|
||||
U32 matchLength;
|
||||
} rawSeq;
|
||||
|
||||
typedef struct {
|
||||
rawSeq* seq; /* The start of the sequences */
|
||||
size_t pos; /* The position where reading stopped. <= size. */
|
||||
size_t size; /* The number of sequences. <= capacity. */
|
||||
size_t capacity; /* The capacity of the `seq` pointer */
|
||||
} rawSeqStore_t;
|
||||
|
||||
struct ZSTD_CCtx_params_s {
|
||||
ZSTD_format_e format;
|
||||
ZSTD_compressionParameters cParams;
|
||||
ZSTD_frameParameters fParams;
|
||||
|
||||
int compressionLevel;
|
||||
int disableLiteralCompression;
|
||||
int forceWindow; /* force back-references to respect limit of
|
||||
* 1<<wLog, even for dictionary */
|
||||
|
||||
/* Multithreading: used to pass parameters to mtctx */
|
||||
unsigned nbWorkers;
|
||||
unsigned jobSize;
|
||||
unsigned overlapSizeLog;
|
||||
|
||||
/* Long distance matching parameters */
|
||||
ldmParams_t ldmParams;
|
||||
|
||||
/* Internal use, for createCCtxParams() and freeCCtxParams() only */
|
||||
ZSTD_customMem customMem;
|
||||
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
|
||||
|
||||
struct ZSTD_CCtx_s {
|
||||
ZSTD_compressionStage_e stage;
|
||||
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
|
||||
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
|
||||
ZSTD_CCtx_params requestedParams;
|
||||
ZSTD_CCtx_params appliedParams;
|
||||
U32 dictID;
|
||||
void* workSpace;
|
||||
size_t workSpaceSize;
|
||||
size_t blockSize;
|
||||
unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
|
||||
unsigned long long consumedSrcSize;
|
||||
unsigned long long producedCSize;
|
||||
XXH64_state_t xxhState;
|
||||
ZSTD_customMem customMem;
|
||||
size_t staticSize;
|
||||
|
||||
seqStore_t seqStore; /* sequences storage ptrs */
|
||||
ldmState_t ldmState; /* long distance matching state */
|
||||
rawSeq* ldmSequences; /* Storage for the ldm output sequences */
|
||||
size_t maxNbLdmSequences;
|
||||
rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
|
||||
ZSTD_blockState_t blockState;
|
||||
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
|
||||
|
||||
/* streaming */
|
||||
char* inBuff;
|
||||
size_t inBuffSize;
|
||||
size_t inToCompress;
|
||||
size_t inBuffPos;
|
||||
size_t inBuffTarget;
|
||||
char* outBuff;
|
||||
size_t outBuffSize;
|
||||
size_t outBuffContentSize;
|
||||
size_t outBuffFlushedSize;
|
||||
ZSTD_cStreamStage streamStage;
|
||||
U32 frameEnded;
|
||||
|
||||
/* Dictionary */
|
||||
ZSTD_CDict* cdictLocal;
|
||||
const ZSTD_CDict* cdict;
|
||||
ZSTD_prefixDict prefixDict; /* single-usage dictionary */
|
||||
|
||||
/* Multi-threading */
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
ZSTDMT_CCtx* mtctx;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
typedef size_t (*ZSTD_blockCompressor) (
|
||||
ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
|
||||
|
||||
|
||||
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
|
||||
{
|
||||
static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 16, 17, 17, 18, 18, 19, 19,
|
||||
20, 20, 20, 20, 21, 21, 21, 21,
|
||||
22, 22, 22, 22, 22, 22, 22, 22,
|
||||
23, 23, 23, 23, 23, 23, 23, 23,
|
||||
24, 24, 24, 24, 24, 24, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24 };
|
||||
static const U32 LL_deltaCode = 19;
|
||||
return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
|
||||
}
|
||||
|
||||
/* ZSTD_MLcode() :
|
||||
* note : mlBase = matchLength - MINMATCH;
|
||||
* because it's the format it's stored in seqStore->sequences */
|
||||
MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
|
||||
{
|
||||
static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
|
||||
32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
|
||||
38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
|
||||
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
|
||||
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
|
||||
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
|
||||
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
|
||||
static const U32 ML_deltaCode = 36;
|
||||
return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
|
||||
}
|
||||
|
||||
/*! ZSTD_storeSeq() :
|
||||
* Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
|
||||
* `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
|
||||
* `mlBase` : matchLength - MINMATCH
|
||||
*/
|
||||
MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
|
||||
{
|
||||
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
|
||||
static const BYTE* g_start = NULL;
|
||||
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
|
||||
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
|
||||
DEBUGLOG(6, "Cpos%7u :%3u literals, match%3u bytes at dist.code%7u",
|
||||
pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
|
||||
}
|
||||
#endif
|
||||
/* copy Literals */
|
||||
assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
|
||||
ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
|
||||
seqStorePtr->lit += litLength;
|
||||
|
||||
/* literal Length */
|
||||
if (litLength>0xFFFF) {
|
||||
assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
|
||||
seqStorePtr->longLengthID = 1;
|
||||
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
|
||||
}
|
||||
seqStorePtr->sequences[0].litLength = (U16)litLength;
|
||||
|
||||
/* match offset */
|
||||
seqStorePtr->sequences[0].offset = offsetCode + 1;
|
||||
|
||||
/* match Length */
|
||||
if (mlBase>0xFFFF) {
|
||||
assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
|
||||
seqStorePtr->longLengthID = 2;
|
||||
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
|
||||
}
|
||||
seqStorePtr->sequences[0].matchLength = (U16)mlBase;
|
||||
|
||||
seqStorePtr->sequences++;
|
||||
}
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Match length counter
|
||||
***************************************/
|
||||
static unsigned ZSTD_NbCommonBytes (size_t val)
|
||||
{
|
||||
if (MEM_isLittleEndian()) {
|
||||
if (MEM_64bits()) {
|
||||
# if defined(_MSC_VER) && defined(_WIN64)
|
||||
unsigned long r = 0;
|
||||
_BitScanForward64( &r, (U64)val );
|
||||
return (unsigned)(r>>3);
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (__builtin_ctzll((U64)val) >> 3);
|
||||
# else
|
||||
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
|
||||
0, 3, 1, 3, 1, 4, 2, 7,
|
||||
0, 2, 3, 6, 1, 5, 3, 5,
|
||||
1, 3, 4, 4, 2, 5, 6, 7,
|
||||
7, 0, 1, 2, 3, 3, 4, 6,
|
||||
2, 6, 5, 5, 3, 4, 5, 6,
|
||||
7, 1, 2, 4, 6, 4, 4, 5,
|
||||
7, 2, 6, 5, 7, 6, 7, 7 };
|
||||
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
|
||||
# endif
|
||||
} else { /* 32 bits */
|
||||
# if defined(_MSC_VER)
|
||||
unsigned long r=0;
|
||||
_BitScanForward( &r, (U32)val );
|
||||
return (unsigned)(r>>3);
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
return (__builtin_ctz((U32)val) >> 3);
|
||||
# else
|
||||
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
|
||||
3, 2, 2, 1, 3, 2, 0, 1,
|
||||
3, 3, 1, 2, 2, 2, 2, 0,
|
||||
3, 1, 2, 0, 1, 0, 1, 1 };
|
||||
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
|
||||
# endif
|
||||
}
|
||||
} else { /* Big Endian CPU */
|
||||
if (MEM_64bits()) {
|
||||
# if defined(_MSC_VER) && defined(_WIN64)
|
||||
unsigned long r = 0;
|
||||
_BitScanReverse64( &r, val );
|
||||
return (unsigned)(r>>3);
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (__builtin_clzll(val) >> 3);
|
||||
# else
|
||||
unsigned r;
|
||||
const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
|
||||
if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
|
||||
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
|
||||
r += (!val);
|
||||
return r;
|
||||
# endif
|
||||
} else { /* 32 bits */
|
||||
# if defined(_MSC_VER)
|
||||
unsigned long r = 0;
|
||||
_BitScanReverse( &r, (unsigned long)val );
|
||||
return (unsigned)(r>>3);
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
return (__builtin_clz((U32)val) >> 3);
|
||||
# else
|
||||
unsigned r;
|
||||
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
|
||||
r += (!val);
|
||||
return r;
|
||||
# endif
|
||||
} }
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
|
||||
{
|
||||
const BYTE* const pStart = pIn;
|
||||
const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
|
||||
|
||||
if (pIn < pInLoopLimit) {
|
||||
{ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
|
||||
if (diff) return ZSTD_NbCommonBytes(diff); }
|
||||
pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
|
||||
while (pIn < pInLoopLimit) {
|
||||
size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
|
||||
if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
|
||||
pIn += ZSTD_NbCommonBytes(diff);
|
||||
return (size_t)(pIn - pStart);
|
||||
} }
|
||||
if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
|
||||
if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
|
||||
if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
|
||||
return (size_t)(pIn - pStart);
|
||||
}
|
||||
|
||||
/** ZSTD_count_2segments() :
|
||||
* can count match length with `ip` & `match` in 2 different segments.
|
||||
* convention : on reaching mEnd, match count continue starting from iStart
|
||||
*/
|
||||
MEM_STATIC size_t
|
||||
ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
|
||||
const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
|
||||
{
|
||||
const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
|
||||
size_t const matchLength = ZSTD_count(ip, match, vEnd);
|
||||
if (match + matchLength != mEnd) return matchLength;
|
||||
return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
|
||||
}
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Hashes
|
||||
***************************************/
|
||||
static const U32 prime3bytes = 506832829U;
|
||||
static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
|
||||
MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
|
||||
|
||||
static const U32 prime4bytes = 2654435761U;
|
||||
static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
|
||||
static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
|
||||
|
||||
static const U64 prime5bytes = 889523592379ULL;
|
||||
static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
|
||||
static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
|
||||
|
||||
static const U64 prime6bytes = 227718039650203ULL;
|
||||
static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
|
||||
static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
|
||||
|
||||
static const U64 prime7bytes = 58295818150454627ULL;
|
||||
static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
|
||||
static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
|
||||
|
||||
static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
|
||||
static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
|
||||
static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
|
||||
|
||||
MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
|
||||
{
|
||||
switch(mls)
|
||||
{
|
||||
default:
|
||||
case 4: return ZSTD_hash4Ptr(p, hBits);
|
||||
case 5: return ZSTD_hash5Ptr(p, hBits);
|
||||
case 6: return ZSTD_hash6Ptr(p, hBits);
|
||||
case 7: return ZSTD_hash7Ptr(p, hBits);
|
||||
case 8: return ZSTD_hash8Ptr(p, hBits);
|
||||
}
|
||||
}
|
||||
|
||||
/*-*************************************
|
||||
* Round buffer management
|
||||
***************************************/
|
||||
/* Max current allowed */
|
||||
#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
|
||||
/* Maximum chunk size before overflow correction needs to be called again */
|
||||
#define ZSTD_CHUNKSIZE_MAX \
|
||||
( ((U32)-1) /* Maximum ending current index */ \
|
||||
- ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
|
||||
|
||||
/**
|
||||
* ZSTD_window_clear():
|
||||
* Clears the window containing the history by simply setting it to empty.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
|
||||
{
|
||||
size_t const endT = (size_t)(window->nextSrc - window->base);
|
||||
U32 const end = (U32)endT;
|
||||
|
||||
window->lowLimit = end;
|
||||
window->dictLimit = end;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_hasExtDict():
|
||||
* Returns non-zero if the window has a non-empty extDict.
|
||||
*/
|
||||
MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
|
||||
{
|
||||
return window.lowLimit < window.dictLimit;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_needOverflowCorrection():
|
||||
* Returns non-zero if the indices are getting too large and need overflow
|
||||
* protection.
|
||||
*/
|
||||
MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
|
||||
void const* srcEnd)
|
||||
{
|
||||
U32 const current = (U32)((BYTE const*)srcEnd - window.base);
|
||||
return current > ZSTD_CURRENT_MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_correctOverflow():
|
||||
* Reduces the indices to protect from index overflow.
|
||||
* Returns the correction made to the indices, which must be applied to every
|
||||
* stored index.
|
||||
*
|
||||
* The least significant cycleLog bits of the indices must remain the same,
|
||||
* which may be 0. Every index up to maxDist in the past must be valid.
|
||||
* NOTE: (maxDist & cycleMask) must be zero.
|
||||
*/
|
||||
MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
|
||||
U32 maxDist, void const* src)
|
||||
{
|
||||
/* preemptive overflow correction:
|
||||
* 1. correction is large enough:
|
||||
* lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
|
||||
* 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
|
||||
*
|
||||
* current - newCurrent
|
||||
* > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
|
||||
* > (3<<29) - (1<<chainLog)
|
||||
* > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
|
||||
* > 1<<29
|
||||
*
|
||||
* 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
|
||||
* After correction, current is less than (1<<chainLog + 1<<windowLog).
|
||||
* In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
|
||||
* In 32-bit mode we are safe, because (chainLog <= 29), so
|
||||
* ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
|
||||
* 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
|
||||
* windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
|
||||
*/
|
||||
U32 const cycleMask = (1U << cycleLog) - 1;
|
||||
U32 const current = (U32)((BYTE const*)src - window->base);
|
||||
U32 const newCurrent = (current & cycleMask) + maxDist;
|
||||
U32 const correction = current - newCurrent;
|
||||
assert((maxDist & cycleMask) == 0);
|
||||
assert(current > newCurrent);
|
||||
/* Loose bound, should be around 1<<29 (see above) */
|
||||
assert(correction > 1<<28);
|
||||
|
||||
window->base += correction;
|
||||
window->dictBase += correction;
|
||||
window->lowLimit -= correction;
|
||||
window->dictLimit -= correction;
|
||||
|
||||
DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
|
||||
window->lowLimit);
|
||||
return correction;
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_enforceMaxDist():
|
||||
* Updates lowLimit so that:
|
||||
* (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
|
||||
* This allows a simple check that index >= lowLimit to see if index is valid.
|
||||
* This must be called before a block compression call, with srcEnd as the block
|
||||
* source end.
|
||||
* If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
|
||||
* This is because dictionaries are allowed to be referenced as long as the last
|
||||
* byte of the dictionary is in the window, but once they are out of range,
|
||||
* they cannot be referenced. If loadedDictEndPtr is NULL, we use
|
||||
* loadedDictEnd == 0.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
|
||||
void const* srcEnd, U32 maxDist,
|
||||
U32* loadedDictEndPtr)
|
||||
{
|
||||
U32 const current = (U32)((BYTE const*)srcEnd - window->base);
|
||||
U32 loadedDictEnd = loadedDictEndPtr != NULL ? *loadedDictEndPtr : 0;
|
||||
if (current > maxDist + loadedDictEnd) {
|
||||
U32 const newLowLimit = current - maxDist;
|
||||
if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
|
||||
if (window->dictLimit < window->lowLimit) {
|
||||
DEBUGLOG(5, "Update dictLimit from %u to %u", window->dictLimit,
|
||||
window->lowLimit);
|
||||
window->dictLimit = window->lowLimit;
|
||||
}
|
||||
if (loadedDictEndPtr)
|
||||
*loadedDictEndPtr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ZSTD_window_update():
|
||||
* Updates the window by appending [src, src + srcSize) to the window.
|
||||
* If it is not contiguous, the current prefix becomes the extDict, and we
|
||||
* forget about the extDict. Handles overlap of the prefix and extDict.
|
||||
* Returns non-zero if the segment is contiguous.
|
||||
*/
|
||||
MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
BYTE const* const ip = (BYTE const*)src;
|
||||
U32 contiguous = 1;
|
||||
/* Check if blocks follow each other */
|
||||
if (src != window->nextSrc) {
|
||||
/* not contiguous */
|
||||
size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
|
||||
DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u",
|
||||
window->dictLimit);
|
||||
window->lowLimit = window->dictLimit;
|
||||
assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
|
||||
window->dictLimit = (U32)distanceFromBase;
|
||||
window->dictBase = window->base;
|
||||
window->base = ip - distanceFromBase;
|
||||
// ms->nextToUpdate = window->dictLimit;
|
||||
if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
|
||||
contiguous = 0;
|
||||
}
|
||||
window->nextSrc = ip + srcSize;
|
||||
/* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
|
||||
if ( (ip+srcSize > window->dictBase + window->lowLimit)
|
||||
& (ip < window->dictBase + window->dictLimit)) {
|
||||
ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
|
||||
U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
|
||||
window->lowLimit = lowLimitMax;
|
||||
}
|
||||
return contiguous;
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* ==============================================================
|
||||
* Private declarations
|
||||
* These prototypes shall only be called from within lib/compress
|
||||
* ============================================================== */
|
||||
|
||||
/* ZSTD_getCParamsFromCCtxParams() :
|
||||
* cParams are built depending on compressionLevel, src size hints,
|
||||
* LDM and manually set compression parameters.
|
||||
*/
|
||||
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
|
||||
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
|
||||
|
||||
/*! ZSTD_initCStream_internal() :
|
||||
* Private use only. Init streaming operation.
|
||||
* expects params to be valid.
|
||||
* must receive dict, or cdict, or none, but not both.
|
||||
* @return : 0, or an error code */
|
||||
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
|
||||
const void* dict, size_t dictSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
||||
|
||||
/*! ZSTD_compressStream_generic() :
|
||||
* Private use only. To be called from zstdmt_compress.c in single-thread mode. */
|
||||
size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
|
||||
ZSTD_outBuffer* output,
|
||||
ZSTD_inBuffer* input,
|
||||
ZSTD_EndDirective const flushMode);
|
||||
|
||||
/*! ZSTD_getCParamsFromCDict() :
|
||||
* as the name implies */
|
||||
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
|
||||
|
||||
/* ZSTD_compressBegin_advanced_internal() :
|
||||
* Private use only. To be called from zstdmt_compress.c. */
|
||||
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_dictContentType_e dictContentType,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params,
|
||||
unsigned long long pledgedSrcSize);
|
||||
|
||||
/* ZSTD_compress_advanced_internal() :
|
||||
* Private use only. To be called from zstdmt_compress.c. */
|
||||
size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize,
|
||||
ZSTD_CCtx_params params);
|
||||
|
||||
|
||||
/* ZSTD_writeLastEmptyBlock() :
|
||||
* output an empty Block with end-of-frame mark to complete a frame
|
||||
* @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
|
||||
* or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
|
||||
*/
|
||||
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
|
||||
|
||||
|
||||
/* ZSTD_referenceExternalSequences() :
|
||||
* Must be called before starting a compression operation.
|
||||
* seqs must parse a prefix of the source.
|
||||
* This cannot be used when long range matching is enabled.
|
||||
* Zstd will use these sequences, and pass the literals to a secondary block
|
||||
* compressor.
|
||||
* @return : An error code on failure.
|
||||
* NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
|
||||
* access and data corruption.
|
||||
*/
|
||||
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
|
||||
|
||||
|
||||
#endif /* ZSTD_COMPRESS_H */
|
3003
third/github.com/DataDog/zstd/zstd_decompress.c
Normal file
3003
third/github.com/DataDog/zstd/zstd_decompress.c
Normal file
File diff suppressed because it is too large
Load Diff
327
third/github.com/DataDog/zstd/zstd_double_fast.c
Normal file
327
third/github.com/DataDog/zstd/zstd_double_fast.c
Normal file
@ -0,0 +1,327 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
#include "zstd_double_fast.h"
|
||||
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end)
|
||||
{
|
||||
U32* const hashLarge = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog;
|
||||
U32 const mls = cParams->searchLength;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash tables.
|
||||
* Insert the other positions into the large hash table if their entry
|
||||
* is empty.
|
||||
*/
|
||||
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
|
||||
U32 const current = (U32)(ip - base);
|
||||
U32 i;
|
||||
for (i = 0; i < fastHashFillStep; ++i) {
|
||||
size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
|
||||
size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
|
||||
if (i == 0)
|
||||
hashSmall[smHash] = current + i;
|
||||
if (i == 0 || hashLarge[lgHash] == 0)
|
||||
hashLarge[lgHash] = current + i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_doubleFast_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
|
||||
U32 const mls /* template */)
|
||||
{
|
||||
U32* const hashLong = ms->hashTable;
|
||||
const U32 hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
const U32 hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ms->window.dictLimit;
|
||||
const BYTE* const lowest = base + lowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
U32 offsetSaved = 0;
|
||||
|
||||
/* init */
|
||||
ip += (ip==lowest);
|
||||
{ U32 const maxRep = (U32)(ip-lowest);
|
||||
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mLength;
|
||||
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const matchIndexL = hashLong[h2];
|
||||
U32 const matchIndexS = hashSmall[h];
|
||||
const BYTE* matchLong = base + matchIndexL;
|
||||
const BYTE* match = base + matchIndexS;
|
||||
hashLong[h2] = hashSmall[h] = current; /* update hash tables */
|
||||
|
||||
assert(offset_1 <= current); /* supposed guaranteed by construction */
|
||||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
|
||||
/* favor repcode */
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
U32 offset;
|
||||
if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
|
||||
mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
|
||||
offset = (U32)(ip-matchLong);
|
||||
while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
} else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
|
||||
size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
U32 const matchIndexL3 = hashLong[hl3];
|
||||
const BYTE* matchL3 = base + matchIndexL3;
|
||||
hashLong[hl3] = current + 1;
|
||||
if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
|
||||
mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
|
||||
ip++;
|
||||
offset = (U32)(ip-matchL3);
|
||||
while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
|
||||
} else {
|
||||
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
|
||||
offset = (U32)(ip-match);
|
||||
while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
} else {
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
}
|
||||
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
|
||||
hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
|
||||
hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
|
||||
|
||||
/* check immediate repcode */
|
||||
while ( (ip <= ilimit)
|
||||
&& ( (offset_2>0)
|
||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1 ? offset_1 : offsetSaved;
|
||||
rep[1] = offset_2 ? offset_2 : offsetSaved;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = cParams->searchLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
|
||||
U32 const mls /* template */)
|
||||
{
|
||||
U32* const hashLong = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ms->window.lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
const U32 matchIndex = hashSmall[hSmall];
|
||||
const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* match = matchBase + matchIndex;
|
||||
|
||||
const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
const U32 matchLongIndex = hashLong[hLong];
|
||||
const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* matchLong = matchLongBase + matchLongIndex;
|
||||
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
|
||||
const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* repMatch = repBase + repIndex;
|
||||
size_t mLength;
|
||||
hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
|
||||
|
||||
if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
|
||||
const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
|
||||
U32 offset;
|
||||
mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
|
||||
offset = current - matchLongIndex;
|
||||
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
} else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
|
||||
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
U32 const matchIndex3 = hashLong[h3];
|
||||
const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
|
||||
const BYTE* match3 = match3Base + matchIndex3;
|
||||
U32 offset;
|
||||
hashLong[h3] = current + 1;
|
||||
if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
|
||||
const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
|
||||
mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
|
||||
ip++;
|
||||
offset = current+1 - matchIndex3;
|
||||
while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
|
||||
} else {
|
||||
const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
|
||||
offset = current - matchIndex;
|
||||
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
|
||||
} else {
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
continue;
|
||||
} }
|
||||
|
||||
/* found a match : store it */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
|
||||
hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
|
||||
hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = cParams->searchLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
|
||||
}
|
||||
}
|
36
third/github.com/DataDog/zstd/zstd_double_fast.h
Normal file
36
third/github.com/DataDog/zstd/zstd_double_fast.h
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_DOUBLE_FAST_H
|
||||
#define ZSTD_DOUBLE_FAST_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "mem.h" /* U32 */
|
||||
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end);
|
||||
size_t ZSTD_compressBlock_doubleFast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_DOUBLE_FAST_H */
|
92
third/github.com/DataDog/zstd/zstd_errors.h
Normal file
92
third/github.com/DataDog/zstd/zstd_errors.h
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_ERRORS_H_398273423
|
||||
#define ZSTD_ERRORS_H_398273423
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*===== dependency =====*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZSTDERRORLIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define ZSTDERRORLIB_VISIBILITY
|
||||
# endif
|
||||
#endif
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
/*-*********************************************
|
||||
* Error codes list
|
||||
*-*********************************************
|
||||
* Error codes _values_ are pinned down since v1.3.1 only.
|
||||
* Therefore, don't rely on values if you may link to any version < v1.3.1.
|
||||
*
|
||||
* Only values < 100 are considered stable.
|
||||
*
|
||||
* note 1 : this API shall be used with static linking only.
|
||||
* dynamic linking is not yet officially supported.
|
||||
* note 2 : Prefer relying on the enum than on its value whenever possible
|
||||
* This is the only supported way to use the error list < v1.3.1
|
||||
* note 3 : ZSTD_isError() is always correct, whatever the library version.
|
||||
**********************************************/
|
||||
typedef enum {
|
||||
ZSTD_error_no_error = 0,
|
||||
ZSTD_error_GENERIC = 1,
|
||||
ZSTD_error_prefix_unknown = 10,
|
||||
ZSTD_error_version_unsupported = 12,
|
||||
ZSTD_error_frameParameter_unsupported = 14,
|
||||
ZSTD_error_frameParameter_windowTooLarge = 16,
|
||||
ZSTD_error_corruption_detected = 20,
|
||||
ZSTD_error_checksum_wrong = 22,
|
||||
ZSTD_error_dictionary_corrupted = 30,
|
||||
ZSTD_error_dictionary_wrong = 32,
|
||||
ZSTD_error_dictionaryCreation_failed = 34,
|
||||
ZSTD_error_parameter_unsupported = 40,
|
||||
ZSTD_error_parameter_outOfBound = 42,
|
||||
ZSTD_error_tableLog_tooLarge = 44,
|
||||
ZSTD_error_maxSymbolValue_tooLarge = 46,
|
||||
ZSTD_error_maxSymbolValue_tooSmall = 48,
|
||||
ZSTD_error_stage_wrong = 60,
|
||||
ZSTD_error_init_missing = 62,
|
||||
ZSTD_error_memory_allocation = 64,
|
||||
ZSTD_error_workSpace_tooSmall= 66,
|
||||
ZSTD_error_dstSize_tooSmall = 70,
|
||||
ZSTD_error_srcSize_wrong = 72,
|
||||
/* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
|
||||
ZSTD_error_frameIndex_tooLarge = 100,
|
||||
ZSTD_error_seekableIO = 102,
|
||||
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
|
||||
} ZSTD_ErrorCode;
|
||||
|
||||
/*! ZSTD_getErrorCode() :
|
||||
convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
|
||||
which can be used to compare with enum list published above */
|
||||
ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
|
||||
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_ERRORS_H_398273423 */
|
259
third/github.com/DataDog/zstd/zstd_fast.c
Normal file
259
third/github.com/DataDog/zstd/zstd_fast.c
Normal file
@ -0,0 +1,259 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
#include "zstd_fast.h"
|
||||
|
||||
|
||||
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end)
|
||||
{
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hBits = cParams->hashLog;
|
||||
U32 const mls = cParams->searchLength;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash table.
|
||||
* Insert the other positions if their hash entry is empty.
|
||||
*/
|
||||
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
|
||||
U32 const current = (U32)(ip - base);
|
||||
U32 i;
|
||||
for (i = 0; i < fastHashFillStep; ++i) {
|
||||
size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls);
|
||||
if (i == 0 || hashTable[hash] == 0)
|
||||
hashTable[hash] = current + i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_fast_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const hlog, U32 const stepSize, U32 const mls)
|
||||
{
|
||||
U32* const hashTable = ms->hashTable;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ms->window.dictLimit;
|
||||
const BYTE* const lowest = base + lowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
U32 offsetSaved = 0;
|
||||
|
||||
/* init */
|
||||
ip += (ip==lowest);
|
||||
{ U32 const maxRep = (U32)(ip-lowest);
|
||||
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mLength;
|
||||
size_t const h = ZSTD_hashPtr(ip, hlog, mls);
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const matchIndex = hashTable[h];
|
||||
const BYTE* match = base + matchIndex;
|
||||
hashTable[h] = current; /* update hash table */
|
||||
|
||||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ( (matchIndex <= lowestIndex)
|
||||
|| (MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
assert(stepSize >= 1);
|
||||
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
|
||||
continue;
|
||||
}
|
||||
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
|
||||
{ U32 const offset = (U32)(ip-match);
|
||||
while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
} }
|
||||
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
|
||||
/* check immediate repcode */
|
||||
while ( (ip <= ilimit)
|
||||
&& ( (offset_2>0)
|
||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
|
||||
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1 ? offset_1 : offsetSaved;
|
||||
rep[1] = offset_2 ? offset_2 : offsetSaved;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_fast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const hlog = cParams->hashLog;
|
||||
U32 const mls = cParams->searchLength;
|
||||
U32 const stepSize = cParams->targetLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_fast_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const hlog, U32 const stepSize, U32 const mls)
|
||||
{
|
||||
U32* hashTable = ms->hashTable;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 lowestIndex = ms->window.lowLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
const size_t h = ZSTD_hashPtr(ip, hlog, mls);
|
||||
const U32 matchIndex = hashTable[h];
|
||||
const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* match = matchBase + matchIndex;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
|
||||
const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* repMatch = repBase + repIndex;
|
||||
size_t mLength;
|
||||
hashTable[h] = current; /* update hash table */
|
||||
|
||||
if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
|
||||
} else {
|
||||
if ( (matchIndex < lowestIndex) ||
|
||||
(MEM_read32(match) != MEM_read32(ip)) ) {
|
||||
assert(stepSize >= 1);
|
||||
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
|
||||
continue;
|
||||
}
|
||||
{ const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
|
||||
const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
|
||||
U32 offset;
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
|
||||
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
offset = current - matchIndex;
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
|
||||
} }
|
||||
|
||||
/* found a match : store it */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Fill Table */
|
||||
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
|
||||
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
|
||||
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_fast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const hlog = cParams->hashLog;
|
||||
U32 const mls = cParams->searchLength;
|
||||
U32 const stepSize = cParams->targetLength;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
|
||||
}
|
||||
}
|
35
third/github.com/DataDog/zstd/zstd_fast.h
Normal file
35
third/github.com/DataDog/zstd/zstd_fast.h
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_FAST_H
|
||||
#define ZSTD_FAST_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "mem.h" /* U32 */
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end);
|
||||
size_t ZSTD_compressBlock_fast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_fast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_FAST_H */
|
290
third/github.com/DataDog/zstd/zstd_internal.h
Normal file
290
third/github.com/DataDog/zstd/zstd_internal.h
Normal file
@ -0,0 +1,290 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CCOMMON_H_MODULE
|
||||
#define ZSTD_CCOMMON_H_MODULE
|
||||
|
||||
/* this module contains definitions which must be identical
|
||||
* across compression, decompression and dictBuilder.
|
||||
* It also contains a few functions useful to at least 2 of them
|
||||
* and which benefit from being inlined */
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "compiler.h"
|
||||
#include "mem.h"
|
||||
#include "error_private.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "zstd.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#define HUF_STATIC_LINKING_ONLY
|
||||
#include "huf.h"
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#endif
|
||||
#include "xxhash.h" /* XXH_reset, update, digest */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Debug
|
||||
***************************************/
|
||||
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
|
||||
# include <assert.h>
|
||||
#else
|
||||
# ifndef assert
|
||||
# define assert(condition) ((void)0)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
|
||||
|
||||
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
|
||||
# include <stdio.h>
|
||||
extern int g_debuglog_enable;
|
||||
/* recommended values for ZSTD_DEBUG display levels :
|
||||
* 1 : no display, enables assert() only
|
||||
* 2 : reserved for currently active debug path
|
||||
* 3 : events once per object lifetime (CCtx, CDict, etc.)
|
||||
* 4 : events once per frame
|
||||
* 5 : events once per block
|
||||
* 6 : events once per sequence (*very* verbose) */
|
||||
# define RAWLOG(l, ...) { \
|
||||
if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) { \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
} }
|
||||
# define DEBUGLOG(l, ...) { \
|
||||
if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) { \
|
||||
fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
|
||||
fprintf(stderr, " \n"); \
|
||||
} }
|
||||
#else
|
||||
# define RAWLOG(l, ...) {} /* disabled */
|
||||
# define DEBUGLOG(l, ...) {} /* disabled */
|
||||
#endif
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* shared macros
|
||||
***************************************/
|
||||
#undef MIN
|
||||
#undef MAX
|
||||
#define MIN(a,b) ((a)<(b) ? (a) : (b))
|
||||
#define MAX(a,b) ((a)>(b) ? (a) : (b))
|
||||
#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */
|
||||
#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Common constants
|
||||
***************************************/
|
||||
#define ZSTD_OPT_NUM (1<<12)
|
||||
|
||||
#define ZSTD_REP_NUM 3 /* number of repcodes */
|
||||
#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
|
||||
static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
|
||||
|
||||
#define KB *(1 <<10)
|
||||
#define MB *(1 <<20)
|
||||
#define GB *(1U<<30)
|
||||
|
||||
#define BIT7 128
|
||||
#define BIT6 64
|
||||
#define BIT5 32
|
||||
#define BIT4 16
|
||||
#define BIT1 2
|
||||
#define BIT0 1
|
||||
|
||||
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
|
||||
#define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */
|
||||
static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
|
||||
static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
|
||||
|
||||
#define ZSTD_FRAMEIDSIZE 4
|
||||
static const size_t ZSTD_frameIdSize = ZSTD_FRAMEIDSIZE; /* magic number size */
|
||||
|
||||
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
|
||||
static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
|
||||
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
|
||||
|
||||
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
|
||||
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
|
||||
|
||||
#define HufLog 12
|
||||
typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
|
||||
|
||||
#define LONGNBSEQ 0x7F00
|
||||
|
||||
#define MINMATCH 3
|
||||
|
||||
#define Litbits 8
|
||||
#define MaxLit ((1<<Litbits) - 1)
|
||||
#define MaxML 52
|
||||
#define MaxLL 35
|
||||
#define DefaultMaxOff 28
|
||||
#define MaxOff 31
|
||||
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
|
||||
#define MLFSELog 9
|
||||
#define LLFSELog 9
|
||||
#define OffFSELog 8
|
||||
#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
|
||||
|
||||
static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 2, 2, 3, 3,
|
||||
4, 6, 7, 8, 9,10,11,12,
|
||||
13,14,15,16 };
|
||||
static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2,
|
||||
2, 2, 2, 2, 2, 1, 1, 1,
|
||||
2, 2, 2, 2, 2, 2, 2, 2,
|
||||
2, 3, 2, 1, 1, 1, 1, 1,
|
||||
-1,-1,-1,-1 };
|
||||
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
|
||||
static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
|
||||
|
||||
static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 2, 2, 3, 3,
|
||||
4, 4, 5, 7, 8, 9,10,11,
|
||||
12,13,14,15,16 };
|
||||
static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2,
|
||||
2, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1,-1,-1,
|
||||
-1,-1,-1,-1,-1 };
|
||||
#define ML_DEFAULTNORMLOG 6 /* for static allocation */
|
||||
static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
|
||||
|
||||
static const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2,
|
||||
2, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
-1,-1,-1,-1,-1 };
|
||||
#define OF_DEFAULTNORMLOG 5 /* for static allocation */
|
||||
static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
|
||||
|
||||
|
||||
/*-*******************************************
|
||||
* Shared functions to include for inlining
|
||||
*********************************************/
|
||||
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
|
||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||
|
||||
/*! ZSTD_wildcopy() :
|
||||
* custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
|
||||
#define WILDCOPY_OVERLENGTH 8
|
||||
MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = op + length;
|
||||
do
|
||||
COPY8(op, ip)
|
||||
while (op < oend);
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = (BYTE*)dstEnd;
|
||||
do
|
||||
COPY8(op, ip)
|
||||
while (op < oend);
|
||||
}
|
||||
|
||||
|
||||
/*-*******************************************
|
||||
* Private declarations
|
||||
*********************************************/
|
||||
typedef struct seqDef_s {
|
||||
U32 offset;
|
||||
U16 litLength;
|
||||
U16 matchLength;
|
||||
} seqDef;
|
||||
|
||||
typedef struct {
|
||||
seqDef* sequencesStart;
|
||||
seqDef* sequences;
|
||||
BYTE* litStart;
|
||||
BYTE* lit;
|
||||
BYTE* llCode;
|
||||
BYTE* mlCode;
|
||||
BYTE* ofCode;
|
||||
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
|
||||
U32 longLengthPos;
|
||||
} seqStore_t;
|
||||
|
||||
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
|
||||
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
|
||||
|
||||
/* custom memory allocation functions */
|
||||
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
|
||||
void* ZSTD_calloc(size_t size, ZSTD_customMem customMem);
|
||||
void ZSTD_free(void* ptr, ZSTD_customMem customMem);
|
||||
|
||||
|
||||
MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
|
||||
{
|
||||
assert(val != 0);
|
||||
{
|
||||
# if defined(_MSC_VER) /* Visual */
|
||||
unsigned long r=0;
|
||||
_BitScanReverse(&r, val);
|
||||
return (unsigned)r;
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
|
||||
return 31 - __builtin_clz(val);
|
||||
# else /* Software version */
|
||||
static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
|
||||
U32 v = val;
|
||||
v |= v >> 1;
|
||||
v |= v >> 2;
|
||||
v |= v >> 4;
|
||||
v |= v >> 8;
|
||||
v |= v >> 16;
|
||||
return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
|
||||
# endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ZSTD_invalidateRepCodes() :
|
||||
* ensures next compression will not use repcodes from previous block.
|
||||
* Note : only works with regular variant;
|
||||
* do not use with extDict variant ! */
|
||||
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
|
||||
|
||||
|
||||
typedef struct {
|
||||
blockType_e blockType;
|
||||
U32 lastBlock;
|
||||
U32 origSize;
|
||||
} blockProperties_t;
|
||||
|
||||
/*! ZSTD_getcBlockSize() :
|
||||
* Provides the size of compressed block from block header `src` */
|
||||
/* Used by: decompress, fullbench (does not get its definition from here) */
|
||||
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
|
||||
blockProperties_t* bpPtr);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_CCOMMON_H_MODULE */
|
824
third/github.com/DataDog/zstd/zstd_lazy.c
Normal file
824
third/github.com/DataDog/zstd/zstd_lazy.c
Normal file
@ -0,0 +1,824 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
#include "zstd_lazy.h"
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Binary Tree search
|
||||
***************************************/
|
||||
|
||||
void ZSTD_updateDUBT(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iend,
|
||||
U32 mls)
|
||||
{
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hashLog = cParams->hashLog;
|
||||
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 const target = (U32)(ip - base);
|
||||
U32 idx = ms->nextToUpdate;
|
||||
|
||||
if (idx != target)
|
||||
DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
|
||||
idx, target, ms->window.dictLimit);
|
||||
assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
|
||||
(void)iend;
|
||||
|
||||
assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
|
||||
for ( ; idx < target ; idx++) {
|
||||
size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
|
||||
U32 const matchIndex = hashTable[h];
|
||||
|
||||
U32* const nextCandidatePtr = bt + 2*(idx&btMask);
|
||||
U32* const sortMarkPtr = nextCandidatePtr + 1;
|
||||
|
||||
DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
|
||||
hashTable[h] = idx; /* Update Hash Table */
|
||||
*nextCandidatePtr = matchIndex; /* update BT like a chain */
|
||||
*sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
|
||||
}
|
||||
ms->nextToUpdate = target;
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_insertDUBT1() :
|
||||
* sort one already inserted but unsorted position
|
||||
* assumption : current >= btlow == (current - btmask)
|
||||
* doesn't fail */
|
||||
static void ZSTD_insertDUBT1(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
U32 current, const BYTE* inputEnd,
|
||||
U32 nbCompares, U32 btLow, int extDict)
|
||||
{
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
|
||||
const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* match;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = smallerPtr + 1;
|
||||
U32 matchIndex = *smallerPtr;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
U32 const windowLow = ms->window.lowLimit;
|
||||
|
||||
DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
|
||||
current, dictLimit, windowLow);
|
||||
assert(current >= btLow);
|
||||
assert(ip < iend); /* condition for ZSTD_count */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
assert(matchIndex < current);
|
||||
|
||||
if ( (!extDict)
|
||||
|| (matchIndex+matchLength >= dictLimit) /* both in current segment*/
|
||||
|| (current < dictLimit) /* both in extDict */) {
|
||||
const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase;
|
||||
assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
|
||||
|| (current < dictLimit) );
|
||||
match = mBase + matchIndex;
|
||||
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
|
||||
current, matchIndex, (U32)matchLength);
|
||||
|
||||
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
|
||||
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
|
||||
/* match is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
|
||||
matchIndex, btLow, nextPtr[1]);
|
||||
smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
|
||||
matchIndex, btLow, nextPtr[0]);
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_DUBT_findBestMatch (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
size_t* offsetPtr,
|
||||
U32 const mls,
|
||||
U32 const extDict)
|
||||
{
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hashLog = cParams->hashLog;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32 matchIndex = hashTable[h];
|
||||
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const windowLow = ms->window.lowLimit;
|
||||
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
U32 const btLow = (btMask >= current) ? 0 : current - btMask;
|
||||
U32 const unsortLimit = MAX(btLow, windowLow);
|
||||
|
||||
U32* nextCandidate = bt + 2*(matchIndex&btMask);
|
||||
U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
|
||||
U32 nbCompares = 1U << cParams->searchLog;
|
||||
U32 nbCandidates = nbCompares;
|
||||
U32 previousCandidate = 0;
|
||||
|
||||
DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
|
||||
assert(ip <= iend-8); /* required for h calculation */
|
||||
|
||||
/* reach end of unsorted candidates list */
|
||||
while ( (matchIndex > unsortLimit)
|
||||
&& (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
|
||||
&& (nbCandidates > 1) ) {
|
||||
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
|
||||
matchIndex);
|
||||
*unsortedMark = previousCandidate;
|
||||
previousCandidate = matchIndex;
|
||||
matchIndex = *nextCandidate;
|
||||
nextCandidate = bt + 2*(matchIndex&btMask);
|
||||
unsortedMark = bt + 2*(matchIndex&btMask) + 1;
|
||||
nbCandidates --;
|
||||
}
|
||||
|
||||
if ( (matchIndex > unsortLimit)
|
||||
&& (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
|
||||
DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
|
||||
matchIndex);
|
||||
*nextCandidate = *unsortedMark = 0; /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */
|
||||
}
|
||||
|
||||
/* batch sort stacked candidates */
|
||||
matchIndex = previousCandidate;
|
||||
while (matchIndex) { /* will end on matchIndex == 0 */
|
||||
U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
|
||||
U32 const nextCandidateIdx = *nextCandidateIdxPtr;
|
||||
ZSTD_insertDUBT1(ms, cParams, matchIndex, iend,
|
||||
nbCandidates, unsortLimit, extDict);
|
||||
matchIndex = nextCandidateIdx;
|
||||
nbCandidates++;
|
||||
}
|
||||
|
||||
/* find longest match */
|
||||
{ size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = bt + 2*(current&btMask) + 1;
|
||||
U32 matchEndIdx = current+8+1;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
size_t bestLength = 0;
|
||||
|
||||
matchIndex = hashTable[h];
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
const BYTE* match;
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
match = base + matchIndex;
|
||||
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
|
||||
bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
|
||||
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
|
||||
break; /* drop, to guarantee consistency (miss a little bit of compression) */
|
||||
}
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) {
|
||||
/* match is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
|
||||
assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
|
||||
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
|
||||
if (bestLength >= MINMATCH) {
|
||||
U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
|
||||
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
|
||||
current, (U32)bestLength, (U32)*offsetPtr, mIndex);
|
||||
}
|
||||
return bestLength;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
|
||||
static size_t ZSTD_BtFindBestMatch (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 mls /* template */)
|
||||
{
|
||||
DEBUGLOG(7, "ZSTD_BtFindBestMatch");
|
||||
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
|
||||
return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0);
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_BtFindBestMatch_selectMLS (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr)
|
||||
{
|
||||
switch(cParams->searchLength)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4);
|
||||
case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Tree updater, providing best match */
|
||||
static size_t ZSTD_BtFindBestMatch_extDict (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 mls)
|
||||
{
|
||||
DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict");
|
||||
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
|
||||
return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1);
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr)
|
||||
{
|
||||
switch(cParams->searchLength)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4);
|
||||
case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* *********************************
|
||||
* Hash Chain
|
||||
***********************************/
|
||||
#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
|
||||
|
||||
/* Update chains up to ip (excluded)
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
static U32 ZSTD_insertAndFindFirstIndex_internal(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, U32 const mls)
|
||||
{
|
||||
U32* const hashTable = ms->hashTable;
|
||||
const U32 hashLog = cParams->hashLog;
|
||||
U32* const chainTable = ms->chainTable;
|
||||
const U32 chainMask = (1 << cParams->chainLog) - 1;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const U32 target = (U32)(ip - base);
|
||||
U32 idx = ms->nextToUpdate;
|
||||
|
||||
while(idx < target) { /* catch up */
|
||||
size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
|
||||
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
|
||||
hashTable[h] = idx;
|
||||
idx++;
|
||||
}
|
||||
|
||||
ms->nextToUpdate = target;
|
||||
return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
|
||||
}
|
||||
|
||||
U32 ZSTD_insertAndFindFirstIndex(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip)
|
||||
{
|
||||
return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength);
|
||||
}
|
||||
|
||||
|
||||
/* inlining is important to hardwire a hot branch (template emulation) */
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_HcFindBestMatch_generic (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr,
|
||||
const U32 mls, const U32 extDict)
|
||||
{
|
||||
U32* const chainTable = ms->chainTable;
|
||||
const U32 chainSize = (1 << cParams->chainLog);
|
||||
const U32 chainMask = chainSize-1;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const U32 lowLimit = ms->window.lowLimit;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 minChain = current > chainSize ? current - chainSize : 0;
|
||||
U32 nbAttempts = 1U << cParams->searchLog;
|
||||
size_t ml=4-1;
|
||||
|
||||
/* HC4 match finder */
|
||||
U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
|
||||
|
||||
for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
|
||||
size_t currentMl=0;
|
||||
if ((!extDict) || matchIndex >= dictLimit) {
|
||||
const BYTE* const match = base + matchIndex;
|
||||
if (match[ml] == ip[ml]) /* potentially better */
|
||||
currentMl = ZSTD_count(ip, match, iLimit);
|
||||
} else {
|
||||
const BYTE* const match = dictBase + matchIndex;
|
||||
assert(match+4 <= dictEnd);
|
||||
if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
|
||||
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
|
||||
}
|
||||
|
||||
/* save best solution */
|
||||
if (currentMl > ml) {
|
||||
ml = currentMl;
|
||||
*offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
|
||||
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
|
||||
}
|
||||
|
||||
if (matchIndex <= minChain) break;
|
||||
matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
|
||||
}
|
||||
|
||||
return ml;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* offsetPtr)
|
||||
{
|
||||
switch(cParams->searchLength)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0);
|
||||
case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iLimit,
|
||||
size_t* const offsetPtr)
|
||||
{
|
||||
switch(cParams->searchLength)
|
||||
{
|
||||
default : /* includes case 3 */
|
||||
case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1);
|
||||
case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* *******************************
|
||||
* Common parser - lazy strategy
|
||||
*********************************/
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_lazy_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore,
|
||||
U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 searchMethod, const U32 depth)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ms->window.base + ms->window.dictLimit;
|
||||
|
||||
typedef size_t (*searchMax_f)(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
|
||||
searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
|
||||
U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
|
||||
|
||||
/* init */
|
||||
ip += (ip==base);
|
||||
ms->nextToUpdate3 = ms->nextToUpdate;
|
||||
{ U32 const maxRep = (U32)(ip-base);
|
||||
if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* Match Loop */
|
||||
while (ip < ilimit) {
|
||||
size_t matchLength=0;
|
||||
size_t offset=0;
|
||||
const BYTE* start=ip+1;
|
||||
|
||||
/* check repCode */
|
||||
if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
|
||||
/* repcode : we take it */
|
||||
matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
if (depth==0) goto _storeSequence;
|
||||
}
|
||||
|
||||
/* first search (depth 0) */
|
||||
{ size_t offsetFound = 99999999;
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
|
||||
if (ml2 > matchLength)
|
||||
matchLength = ml2, start = ip, offset=offsetFound;
|
||||
}
|
||||
|
||||
if (matchLength < 4) {
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* let's try to find a better solution */
|
||||
if (depth>=1)
|
||||
while (ip<ilimit) {
|
||||
ip ++;
|
||||
if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
|
||||
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
|
||||
int const gain2 = (int)(mlRep * 3);
|
||||
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
|
||||
if ((mlRep >= 4) && (gain2 > gain1))
|
||||
matchLength = mlRep, offset = 0, start = ip;
|
||||
}
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
matchLength = ml2, offset = offset2, start = ip;
|
||||
continue; /* search a better one */
|
||||
} }
|
||||
|
||||
/* let's find an even better one */
|
||||
if ((depth==2) && (ip<ilimit)) {
|
||||
ip ++;
|
||||
if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
|
||||
size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
|
||||
int const gain2 = (int)(ml2 * 4);
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
|
||||
if ((ml2 >= 4) && (gain2 > gain1))
|
||||
matchLength = ml2, offset = 0, start = ip;
|
||||
}
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
matchLength = ml2, offset = offset2, start = ip;
|
||||
continue;
|
||||
} } }
|
||||
break; /* nothing found : store previous solution */
|
||||
}
|
||||
|
||||
/* NOTE:
|
||||
* start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
|
||||
* (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
|
||||
* overflows the pointer, which is undefined behavior.
|
||||
*/
|
||||
/* catch up */
|
||||
if (offset) {
|
||||
while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > base))
|
||||
&& (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
|
||||
{ start--; matchLength++; }
|
||||
offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
|
||||
}
|
||||
/* store sequence */
|
||||
_storeSequence:
|
||||
{ size_t const litLength = start - anchor;
|
||||
ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
anchor = ip = start + matchLength;
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while ( ((ip <= ilimit) & (offset_2>0))
|
||||
&& (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
|
||||
/* store sequence */
|
||||
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
} }
|
||||
|
||||
/* Save reps for next block */
|
||||
rep[0] = offset_1 ? offset_1 : savedOffset;
|
||||
rep[1] = offset_2 ? offset_2 : savedOffset;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_greedy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_lazy_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore,
|
||||
U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
const void* src, size_t srcSize,
|
||||
const U32 searchMethod, const U32 depth)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const U32 lowestIndex = ms->window.lowLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const dictStart = dictBase + lowestIndex;
|
||||
|
||||
typedef size_t (*searchMax_f)(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
|
||||
searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
|
||||
|
||||
U32 offset_1 = rep[0], offset_2 = rep[1];
|
||||
|
||||
/* init */
|
||||
ms->nextToUpdate3 = ms->nextToUpdate;
|
||||
ip += (ip == prefixStart);
|
||||
|
||||
/* Match Loop */
|
||||
while (ip < ilimit) {
|
||||
size_t matchLength=0;
|
||||
size_t offset=0;
|
||||
const BYTE* start=ip+1;
|
||||
U32 current = (U32)(ip-base);
|
||||
|
||||
/* check repCode */
|
||||
{ const U32 repIndex = (U32)(current+1 - offset_1);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
|
||||
/* repcode detected we should take it */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
if (depth==0) goto _storeSequence;
|
||||
} }
|
||||
|
||||
/* first search (depth 0) */
|
||||
{ size_t offsetFound = 99999999;
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
|
||||
if (ml2 > matchLength)
|
||||
matchLength = ml2, start = ip, offset=offsetFound;
|
||||
}
|
||||
|
||||
if (matchLength < 4) {
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* let's try to find a better solution */
|
||||
if (depth>=1)
|
||||
while (ip<ilimit) {
|
||||
ip ++;
|
||||
current++;
|
||||
/* check repCode */
|
||||
if (offset) {
|
||||
const U32 repIndex = (U32)(current - offset_1);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if (MEM_read32(ip) == MEM_read32(repMatch)) {
|
||||
/* repcode detected */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
int const gain2 = (int)(repLength * 3);
|
||||
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
|
||||
if ((repLength >= 4) && (gain2 > gain1))
|
||||
matchLength = repLength, offset = 0, start = ip;
|
||||
} }
|
||||
|
||||
/* search match, depth 1 */
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
matchLength = ml2, offset = offset2, start = ip;
|
||||
continue; /* search a better one */
|
||||
} }
|
||||
|
||||
/* let's find an even better one */
|
||||
if ((depth==2) && (ip<ilimit)) {
|
||||
ip ++;
|
||||
current++;
|
||||
/* check repCode */
|
||||
if (offset) {
|
||||
const U32 repIndex = (U32)(current - offset_1);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if (MEM_read32(ip) == MEM_read32(repMatch)) {
|
||||
/* repcode detected */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
int const gain2 = (int)(repLength * 4);
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
|
||||
if ((repLength >= 4) && (gain2 > gain1))
|
||||
matchLength = repLength, offset = 0, start = ip;
|
||||
} }
|
||||
|
||||
/* search match, depth 2 */
|
||||
{ size_t offset2=99999999;
|
||||
size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
|
||||
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
|
||||
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
|
||||
if ((ml2 >= 4) && (gain2 > gain1)) {
|
||||
matchLength = ml2, offset = offset2, start = ip;
|
||||
continue;
|
||||
} } }
|
||||
break; /* nothing found : store previous solution */
|
||||
}
|
||||
|
||||
/* catch up */
|
||||
if (offset) {
|
||||
U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
|
||||
const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
|
||||
const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
|
||||
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
|
||||
offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
|
||||
}
|
||||
|
||||
/* store sequence */
|
||||
_storeSequence:
|
||||
{ size_t const litLength = start - anchor;
|
||||
ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
|
||||
anchor = ip = start + matchLength;
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
const U32 repIndex = (U32)((ip-base) - offset_2);
|
||||
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
|
||||
if (MEM_read32(ip) == MEM_read32(repMatch)) {
|
||||
/* repcode detected we should take it */
|
||||
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
|
||||
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
|
||||
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
|
||||
ip += matchLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
}
|
||||
break;
|
||||
} }
|
||||
|
||||
/* Save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
|
||||
{
|
||||
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
|
||||
}
|
56
third/github.com/DataDog/zstd/zstd_lazy.h
Normal file
56
third/github.com/DataDog/zstd/zstd_lazy.h
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LAZY_H
|
||||
#define ZSTD_LAZY_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
U32 ZSTD_insertAndFindFirstIndex(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip);
|
||||
|
||||
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_LAZY_H */
|
653
third/github.com/DataDog/zstd/zstd_ldm.c
Normal file
653
third/github.com/DataDog/zstd/zstd_ldm.c
Normal file
@ -0,0 +1,653 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#include "zstd_ldm.h"
|
||||
|
||||
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
|
||||
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
|
||||
|
||||
#define LDM_BUCKET_SIZE_LOG 3
|
||||
#define LDM_MIN_MATCH_LENGTH 64
|
||||
#define LDM_HASH_RLOG 7
|
||||
#define LDM_HASH_CHAR_OFFSET 10
|
||||
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
||||
ZSTD_compressionParameters const* cParams)
|
||||
{
|
||||
U32 const windowLog = cParams->windowLog;
|
||||
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
|
||||
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
|
||||
if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
|
||||
if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
|
||||
if (cParams->strategy >= ZSTD_btopt) {
|
||||
/* Get out of the way of the optimal parser */
|
||||
U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength);
|
||||
assert(minMatch >= ZSTD_LDM_MINMATCH_MIN);
|
||||
assert(minMatch <= ZSTD_LDM_MINMATCH_MAX);
|
||||
params->minMatchLength = minMatch;
|
||||
}
|
||||
if (params->hashLog == 0) {
|
||||
params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
|
||||
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
|
||||
}
|
||||
if (params->hashEveryLog == 0) {
|
||||
params->hashEveryLog =
|
||||
windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
|
||||
}
|
||||
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getTableSize(ldmParams_t params)
|
||||
{
|
||||
size_t const ldmHSize = ((size_t)1) << params.hashLog;
|
||||
size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
|
||||
size_t const ldmBucketSize =
|
||||
((size_t)1) << (params.hashLog - ldmBucketSizeLog);
|
||||
size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t);
|
||||
return params.enableLdm ? totalSize : 0;
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
|
||||
{
|
||||
return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getSmallHash() :
|
||||
* numBits should be <= 32
|
||||
* If numBits==0, returns 0.
|
||||
* @return : the most significant numBits of value. */
|
||||
static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
|
||||
{
|
||||
assert(numBits <= 32);
|
||||
return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getChecksum() :
|
||||
* numBitsToDiscard should be <= 32
|
||||
* @return : the next most significant 32 bits after numBitsToDiscard */
|
||||
static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
|
||||
{
|
||||
assert(numBitsToDiscard <= 32);
|
||||
return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getTag() ;
|
||||
* Given the hash, returns the most significant numTagBits bits
|
||||
* after (32 + hbits) bits.
|
||||
*
|
||||
* If there are not enough bits remaining, return the last
|
||||
* numTagBits bits. */
|
||||
static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
|
||||
{
|
||||
assert(numTagBits < 32 && hbits <= 32);
|
||||
if (32 - hbits < numTagBits) {
|
||||
return hash & (((U32)1 << numTagBits) - 1);
|
||||
} else {
|
||||
return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getBucket() :
|
||||
* Returns a pointer to the start of the bucket associated with hash. */
|
||||
static ldmEntry_t* ZSTD_ldm_getBucket(
|
||||
ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
|
||||
{
|
||||
return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_insertEntry() :
|
||||
* Insert the entry with corresponding hash into the hash table */
|
||||
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
|
||||
size_t const hash, const ldmEntry_t entry,
|
||||
ldmParams_t const ldmParams)
|
||||
{
|
||||
BYTE* const bucketOffsets = ldmState->bucketOffsets;
|
||||
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
|
||||
bucketOffsets[hash]++;
|
||||
bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_makeEntryAndInsertByTag() :
|
||||
*
|
||||
* Gets the small hash, checksum, and tag from the rollingHash.
|
||||
*
|
||||
* If the tag matches (1 << ldmParams.hashEveryLog)-1, then
|
||||
* creates an ldmEntry from the offset, and inserts it into the hash table.
|
||||
*
|
||||
* hBits is the length of the small hash, which is the most significant hBits
|
||||
* of rollingHash. The checksum is the next 32 most significant bits, followed
|
||||
* by ldmParams.hashEveryLog bits that make up the tag. */
|
||||
static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
|
||||
U64 const rollingHash,
|
||||
U32 const hBits,
|
||||
U32 const offset,
|
||||
ldmParams_t const ldmParams)
|
||||
{
|
||||
U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog);
|
||||
U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
|
||||
if (tag == tagMask) {
|
||||
U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
|
||||
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
||||
ldmEntry_t entry;
|
||||
entry.offset = offset;
|
||||
entry.checksum = checksum;
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
|
||||
}
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getRollingHash() :
|
||||
* Get a 64-bit hash using the first len bytes from buf.
|
||||
*
|
||||
* Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
|
||||
* H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
|
||||
*
|
||||
* where the constant a is defined to be prime8bytes.
|
||||
*
|
||||
* The implementation adds an offset to each byte, so
|
||||
* H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */
|
||||
static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len)
|
||||
{
|
||||
U64 ret = 0;
|
||||
U32 i;
|
||||
for (i = 0; i < len; i++) {
|
||||
ret *= prime8bytes;
|
||||
ret += buf[i] + LDM_HASH_CHAR_OFFSET;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_ipow() :
|
||||
* Return base^exp. */
|
||||
static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
|
||||
{
|
||||
U64 ret = 1;
|
||||
while (exp) {
|
||||
if (exp & 1) { ret *= base; }
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
|
||||
DEBUGLOG(4, "ZSTD_ldm_getHashPower: mml=%u", minMatchLength);
|
||||
assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
|
||||
return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_updateHash() :
|
||||
* Updates hash by removing toRemove and adding toAdd. */
|
||||
static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower)
|
||||
{
|
||||
hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower);
|
||||
hash *= prime8bytes;
|
||||
hash += toAdd + LDM_HASH_CHAR_OFFSET;
|
||||
return hash;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_countBackwardsMatch() :
|
||||
* Returns the number of bytes that match backwards before pIn and pMatch.
|
||||
*
|
||||
* We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
|
||||
static size_t ZSTD_ldm_countBackwardsMatch(
|
||||
const BYTE* pIn, const BYTE* pAnchor,
|
||||
const BYTE* pMatch, const BYTE* pBase)
|
||||
{
|
||||
size_t matchLength = 0;
|
||||
while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
|
||||
pIn--;
|
||||
pMatch--;
|
||||
matchLength++;
|
||||
}
|
||||
return matchLength;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_fillFastTables() :
|
||||
*
|
||||
* Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
|
||||
* This is similar to ZSTD_loadDictionaryContent.
|
||||
*
|
||||
* The tables for the other strategies are filled within their
|
||||
* block compressors. */
|
||||
static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* end)
|
||||
{
|
||||
const BYTE* const iend = (const BYTE*)end;
|
||||
|
||||
switch(cParams->strategy)
|
||||
{
|
||||
case ZSTD_fast:
|
||||
ZSTD_fillHashTable(ms, cParams, iend);
|
||||
ms->nextToUpdate = (U32)(iend - ms->window.base);
|
||||
break;
|
||||
|
||||
case ZSTD_dfast:
|
||||
ZSTD_fillDoubleHashTable(ms, cParams, iend);
|
||||
ms->nextToUpdate = (U32)(iend - ms->window.base);
|
||||
break;
|
||||
|
||||
case ZSTD_greedy:
|
||||
case ZSTD_lazy:
|
||||
case ZSTD_lazy2:
|
||||
case ZSTD_btlazy2:
|
||||
case ZSTD_btopt:
|
||||
case ZSTD_btultra:
|
||||
break;
|
||||
default:
|
||||
assert(0); /* not possible : not a valid strategy id */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_fillLdmHashTable() :
|
||||
*
|
||||
* Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
|
||||
* lastHash is the rolling hash that corresponds to lastHashed.
|
||||
*
|
||||
* Returns the rolling hash corresponding to position iend-1. */
|
||||
static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
|
||||
U64 lastHash, const BYTE* lastHashed,
|
||||
const BYTE* iend, const BYTE* base,
|
||||
U32 hBits, ldmParams_t const ldmParams)
|
||||
{
|
||||
U64 rollingHash = lastHash;
|
||||
const BYTE* cur = lastHashed + 1;
|
||||
|
||||
while (cur < iend) {
|
||||
rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1],
|
||||
cur[ldmParams.minMatchLength-1],
|
||||
state->hashPower);
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(state,
|
||||
rollingHash, hBits,
|
||||
(U32)(cur - base), ldmParams);
|
||||
++cur;
|
||||
}
|
||||
return rollingHash;
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_ldm_limitTableUpdate() :
|
||||
*
|
||||
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
|
||||
* if it is far way
|
||||
* (after a long match, only update tables a limited amount). */
|
||||
static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
|
||||
{
|
||||
U32 const current = (U32)(anchor - ms->window.base);
|
||||
if (current > ms->nextToUpdate + 1024) {
|
||||
ms->nextToUpdate =
|
||||
current - MIN(512, current - ms->nextToUpdate - 1024);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t ZSTD_ldm_generateSequences_internal(
|
||||
ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize)
|
||||
{
|
||||
/* LDM parameters */
|
||||
int const extDict = ZSTD_window_hasExtDict(ldmState->window);
|
||||
U32 const minMatchLength = params->minMatchLength;
|
||||
U64 const hashPower = ldmState->hashPower;
|
||||
U32 const hBits = params->hashLog - params->bucketSizeLog;
|
||||
U32 const ldmBucketSize = 1U << params->bucketSizeLog;
|
||||
U32 const hashEveryLog = params->hashEveryLog;
|
||||
U32 const ldmTagMask = (1U << params->hashEveryLog) - 1;
|
||||
/* Prefix and extDict parameters */
|
||||
U32 const dictLimit = ldmState->window.dictLimit;
|
||||
U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
|
||||
BYTE const* const base = ldmState->window.base;
|
||||
BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
|
||||
BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
|
||||
BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
|
||||
BYTE const* const lowPrefixPtr = base + dictLimit;
|
||||
/* Input bounds */
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE);
|
||||
/* Input positions */
|
||||
BYTE const* anchor = istart;
|
||||
BYTE const* ip = istart;
|
||||
/* Rolling hash */
|
||||
BYTE const* lastHashed = NULL;
|
||||
U64 rollingHash = 0;
|
||||
|
||||
while (ip <= ilimit) {
|
||||
size_t mLength;
|
||||
U32 const current = (U32)(ip - base);
|
||||
size_t forwardMatchLength = 0, backwardMatchLength = 0;
|
||||
ldmEntry_t* bestEntry = NULL;
|
||||
if (ip != istart) {
|
||||
rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
|
||||
lastHashed[minMatchLength],
|
||||
hashPower);
|
||||
} else {
|
||||
rollingHash = ZSTD_ldm_getRollingHash(ip, minMatchLength);
|
||||
}
|
||||
lastHashed = ip;
|
||||
|
||||
/* Do not insert and do not look for a match */
|
||||
if (ZSTD_ldm_getTag(rollingHash, hBits, hashEveryLog) != ldmTagMask) {
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the best entry and compute the match lengths */
|
||||
{
|
||||
ldmEntry_t* const bucket =
|
||||
ZSTD_ldm_getBucket(ldmState,
|
||||
ZSTD_ldm_getSmallHash(rollingHash, hBits),
|
||||
*params);
|
||||
ldmEntry_t* cur;
|
||||
size_t bestMatchLength = 0;
|
||||
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
||||
|
||||
for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
|
||||
size_t curForwardMatchLength, curBackwardMatchLength,
|
||||
curTotalMatchLength;
|
||||
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
|
||||
continue;
|
||||
}
|
||||
if (extDict) {
|
||||
BYTE const* const curMatchBase =
|
||||
cur->offset < dictLimit ? dictBase : base;
|
||||
BYTE const* const pMatch = curMatchBase + cur->offset;
|
||||
BYTE const* const matchEnd =
|
||||
cur->offset < dictLimit ? dictEnd : iend;
|
||||
BYTE const* const lowMatchPtr =
|
||||
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
|
||||
|
||||
curForwardMatchLength = ZSTD_count_2segments(
|
||||
ip, pMatch, iend,
|
||||
matchEnd, lowPrefixPtr);
|
||||
if (curForwardMatchLength < minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength =
|
||||
ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
|
||||
lowMatchPtr);
|
||||
curTotalMatchLength = curForwardMatchLength +
|
||||
curBackwardMatchLength;
|
||||
} else { /* !extDict */
|
||||
BYTE const* const pMatch = base + cur->offset;
|
||||
curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
|
||||
if (curForwardMatchLength < minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength =
|
||||
ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
|
||||
lowPrefixPtr);
|
||||
curTotalMatchLength = curForwardMatchLength +
|
||||
curBackwardMatchLength;
|
||||
}
|
||||
|
||||
if (curTotalMatchLength > bestMatchLength) {
|
||||
bestMatchLength = curTotalMatchLength;
|
||||
forwardMatchLength = curForwardMatchLength;
|
||||
backwardMatchLength = curBackwardMatchLength;
|
||||
bestEntry = cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* No match found -- continue searching */
|
||||
if (bestEntry == NULL) {
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
|
||||
hBits, current,
|
||||
*params);
|
||||
ip++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Match found */
|
||||
mLength = forwardMatchLength + backwardMatchLength;
|
||||
ip -= backwardMatchLength;
|
||||
|
||||
{
|
||||
/* Store the sequence:
|
||||
* ip = current - backwardMatchLength
|
||||
* The match is at (bestEntry->offset - backwardMatchLength)
|
||||
*/
|
||||
U32 const matchIndex = bestEntry->offset;
|
||||
U32 const offset = current - matchIndex;
|
||||
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
|
||||
|
||||
/* Out of sequence storage */
|
||||
if (rawSeqStore->size == rawSeqStore->capacity)
|
||||
return ERROR(dstSize_tooSmall);
|
||||
seq->litLength = (U32)(ip - anchor);
|
||||
seq->matchLength = (U32)mLength;
|
||||
seq->offset = offset;
|
||||
rawSeqStore->size++;
|
||||
}
|
||||
|
||||
/* Insert the current entry into the hash table */
|
||||
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
|
||||
(U32)(lastHashed - base),
|
||||
*params);
|
||||
|
||||
assert(ip + backwardMatchLength == lastHashed);
|
||||
|
||||
/* Fill the hash table from lastHashed+1 to ip+mLength*/
|
||||
/* Heuristic: don't need to fill the entire table at end of block */
|
||||
if (ip + mLength <= ilimit) {
|
||||
rollingHash = ZSTD_ldm_fillLdmHashTable(
|
||||
ldmState, rollingHash, lastHashed,
|
||||
ip + mLength, base, hBits, *params);
|
||||
lastHashed = ip + mLength - 1;
|
||||
}
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
}
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
/*! ZSTD_ldm_reduceTable() :
|
||||
* reduce table indexes by `reducerValue` */
|
||||
static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
|
||||
U32 const reducerValue)
|
||||
{
|
||||
U32 u;
|
||||
for (u = 0; u < size; u++) {
|
||||
if (table[u].offset < reducerValue) table[u].offset = 0;
|
||||
else table[u].offset -= reducerValue;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_generateSequences(
|
||||
ldmState_t* ldmState, rawSeqStore_t* sequences,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const maxDist = 1U << params->windowLog;
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
size_t const kMaxChunkSize = 1 << 20;
|
||||
size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
|
||||
size_t chunk;
|
||||
size_t leftoverSize = 0;
|
||||
|
||||
assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
|
||||
/* Check that ZSTD_window_update() has been called for this chunk prior
|
||||
* to passing it to this function.
|
||||
*/
|
||||
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
|
||||
/* The input could be very large (in zstdmt), so it must be broken up into
|
||||
* chunks to enforce the maximmum distance and handle overflow correction.
|
||||
*/
|
||||
assert(sequences->pos <= sequences->size);
|
||||
assert(sequences->size <= sequences->capacity);
|
||||
for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
|
||||
BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
|
||||
size_t const remaining = (size_t)(iend - chunkStart);
|
||||
BYTE const *const chunkEnd =
|
||||
(remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
|
||||
size_t const chunkSize = chunkEnd - chunkStart;
|
||||
size_t newLeftoverSize;
|
||||
size_t const prevSize = sequences->size;
|
||||
|
||||
assert(chunkStart < iend);
|
||||
/* 1. Perform overflow correction if necessary. */
|
||||
if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
|
||||
U32 const ldmHSize = 1U << params->hashLog;
|
||||
U32 const correction = ZSTD_window_correctOverflow(
|
||||
&ldmState->window, /* cycleLog */ 0, maxDist, src);
|
||||
ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
|
||||
}
|
||||
/* 2. We enforce the maximum offset allowed.
|
||||
*
|
||||
* kMaxChunkSize should be small enough that we don't lose too much of
|
||||
* the window through early invalidation.
|
||||
* TODO: * Test the chunk size.
|
||||
* * Try invalidation after the sequence generation and test the
|
||||
* the offset against maxDist directly.
|
||||
*/
|
||||
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL);
|
||||
/* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
|
||||
newLeftoverSize = ZSTD_ldm_generateSequences_internal(
|
||||
ldmState, sequences, params, chunkStart, chunkSize);
|
||||
if (ZSTD_isError(newLeftoverSize))
|
||||
return newLeftoverSize;
|
||||
/* 4. We add the leftover literals from previous iterations to the first
|
||||
* newly generated sequence, or add the `newLeftoverSize` if none are
|
||||
* generated.
|
||||
*/
|
||||
/* Prepend the leftover literals from the last call */
|
||||
if (prevSize < sequences->size) {
|
||||
sequences->seq[prevSize].litLength += (U32)leftoverSize;
|
||||
leftoverSize = newLeftoverSize;
|
||||
} else {
|
||||
assert(newLeftoverSize == chunkSize);
|
||||
leftoverSize += chunkSize;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
|
||||
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
|
||||
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
|
||||
if (srcSize <= seq->litLength) {
|
||||
/* Skip past srcSize literals */
|
||||
seq->litLength -= (U32)srcSize;
|
||||
return;
|
||||
}
|
||||
srcSize -= seq->litLength;
|
||||
seq->litLength = 0;
|
||||
if (srcSize < seq->matchLength) {
|
||||
/* Skip past the first srcSize of the match */
|
||||
seq->matchLength -= (U32)srcSize;
|
||||
if (seq->matchLength < minMatch) {
|
||||
/* The match is too short, omit it */
|
||||
if (rawSeqStore->pos + 1 < rawSeqStore->size) {
|
||||
seq[1].litLength += seq[0].matchLength;
|
||||
}
|
||||
rawSeqStore->pos++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
srcSize -= seq->matchLength;
|
||||
seq->matchLength = 0;
|
||||
rawSeqStore->pos++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If the sequence length is longer than remaining then the sequence is split
|
||||
* between this block and the next.
|
||||
*
|
||||
* Returns the current sequence to handle, or if the rest of the block should
|
||||
* be literals, it returns a sequence with offset == 0.
|
||||
*/
|
||||
static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
|
||||
U32 const remaining, U32 const minMatch)
|
||||
{
|
||||
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
|
||||
assert(sequence.offset > 0);
|
||||
/* Likely: No partial sequence */
|
||||
if (remaining >= sequence.litLength + sequence.matchLength) {
|
||||
rawSeqStore->pos++;
|
||||
return sequence;
|
||||
}
|
||||
/* Cut the sequence short (offset == 0 ==> rest is literals). */
|
||||
if (remaining <= sequence.litLength) {
|
||||
sequence.offset = 0;
|
||||
} else if (remaining < sequence.litLength + sequence.matchLength) {
|
||||
sequence.matchLength = remaining - sequence.litLength;
|
||||
if (sequence.matchLength < minMatch) {
|
||||
sequence.offset = 0;
|
||||
}
|
||||
}
|
||||
/* Skip past `remaining` bytes for the future sequences. */
|
||||
ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
|
||||
return sequence;
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
|
||||
int const extDict)
|
||||
{
|
||||
unsigned const minMatch = cParams->searchLength;
|
||||
ZSTD_blockCompressor const blockCompressor =
|
||||
ZSTD_selectBlockCompressor(cParams->strategy, extDict);
|
||||
BYTE const* const base = ms->window.base;
|
||||
/* Input bounds */
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
/* Input positions */
|
||||
BYTE const* ip = istart;
|
||||
|
||||
assert(rawSeqStore->pos <= rawSeqStore->size);
|
||||
assert(rawSeqStore->size <= rawSeqStore->capacity);
|
||||
/* Loop through each sequence and apply the block compressor to the lits */
|
||||
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
|
||||
/* maybeSplitSequence updates rawSeqStore->pos */
|
||||
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
|
||||
(U32)(iend - ip), minMatch);
|
||||
int i;
|
||||
/* End signal */
|
||||
if (sequence.offset == 0)
|
||||
break;
|
||||
|
||||
assert(sequence.offset <= (1U << cParams->windowLog));
|
||||
assert(ip + sequence.litLength + sequence.matchLength <= iend);
|
||||
|
||||
/* Fill tables for block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ms, ip);
|
||||
ZSTD_ldm_fillFastTables(ms, cParams, ip);
|
||||
/* Run the block compressor */
|
||||
{
|
||||
size_t const newLitLength =
|
||||
blockCompressor(ms, seqStore, rep, cParams, ip,
|
||||
sequence.litLength);
|
||||
ip += sequence.litLength;
|
||||
ms->nextToUpdate = (U32)(ip - base);
|
||||
/* Update the repcodes */
|
||||
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
|
||||
rep[i] = rep[i-1];
|
||||
rep[0] = sequence.offset;
|
||||
/* Store the sequence */
|
||||
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
|
||||
sequence.offset + ZSTD_REP_MOVE,
|
||||
sequence.matchLength - MINMATCH);
|
||||
ip += sequence.matchLength;
|
||||
}
|
||||
}
|
||||
/* Fill the tables for the block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ms, ip);
|
||||
ZSTD_ldm_fillFastTables(ms, cParams, ip);
|
||||
/* Compress the last literals */
|
||||
{
|
||||
size_t const lastLiterals = blockCompressor(ms, seqStore, rep, cParams,
|
||||
ip, iend - ip);
|
||||
ms->nextToUpdate = (U32)(iend - base);
|
||||
return lastLiterals;
|
||||
}
|
||||
}
|
111
third/github.com/DataDog/zstd/zstd_ldm.h
Normal file
111
third/github.com/DataDog/zstd/zstd_ldm.h
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LDM_H
|
||||
#define ZSTD_LDM_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
|
||||
#include "zstd.h" /* ZSTD_CCtx, size_t */
|
||||
|
||||
/*-*************************************
|
||||
* Long distance matching
|
||||
***************************************/
|
||||
|
||||
#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX
|
||||
|
||||
/**
|
||||
* ZSTD_ldm_generateSequences():
|
||||
*
|
||||
* Generates the sequences using the long distance match finder.
|
||||
* Generates long range matching sequences in `sequences`, which parse a prefix
|
||||
* of the source. `sequences` must be large enough to store every sequence,
|
||||
* which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
|
||||
* @returns 0 or an error code.
|
||||
*
|
||||
* NOTE: The user must have called ZSTD_window_update() for all of the input
|
||||
* they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
|
||||
* NOTE: This function returns an error if it runs out of space to store
|
||||
* sequences.
|
||||
*/
|
||||
size_t ZSTD_ldm_generateSequences(
|
||||
ldmState_t* ldms, rawSeqStore_t* sequences,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize);
|
||||
|
||||
/**
|
||||
* ZSTD_ldm_blockCompress():
|
||||
*
|
||||
* Compresses a block using the predefined sequences, along with a secondary
|
||||
* block compressor. The literals section of every sequence is passed to the
|
||||
* secondary block compressor, and those sequences are interspersed with the
|
||||
* predefined sequences. Returns the length of the last literals.
|
||||
* Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
|
||||
* `rawSeqStore.seq` may also be updated to split the last sequence between two
|
||||
* blocks.
|
||||
* @return The length of the last literals.
|
||||
*
|
||||
* NOTE: The source must be at most the maximum block size, but the predefined
|
||||
* sequences can be any size, and may be longer than the block. In the case that
|
||||
* they are longer than the block, the last sequences may need to be split into
|
||||
* two. We handle that case correctly, and update `rawSeqStore` appropriately.
|
||||
* NOTE: This function does not return any errors.
|
||||
*/
|
||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
void const* src, size_t srcSize,
|
||||
int const extDict);
|
||||
|
||||
/**
|
||||
* ZSTD_ldm_skipSequences():
|
||||
*
|
||||
* Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
|
||||
* Avoids emitting matches less than `minMatch` bytes.
|
||||
* Must be called for data with is not passed to ZSTD_ldm_blockCompress().
|
||||
*/
|
||||
void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
|
||||
U32 const minMatch);
|
||||
|
||||
|
||||
/** ZSTD_ldm_getTableSize() :
|
||||
* Estimate the space needed for long distance matching tables or 0 if LDM is
|
||||
* disabled.
|
||||
*/
|
||||
size_t ZSTD_ldm_getTableSize(ldmParams_t params);
|
||||
|
||||
/** ZSTD_ldm_getSeqSpace() :
|
||||
* Return an upper bound on the number of sequences that can be produced by
|
||||
* the long distance matcher, or 0 if LDM is disabled.
|
||||
*/
|
||||
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
|
||||
|
||||
/** ZSTD_ldm_getTableSize() :
|
||||
* Return prime8bytes^(minMatchLength-1) */
|
||||
U64 ZSTD_ldm_getHashPower(U32 minMatchLength);
|
||||
|
||||
/** ZSTD_ldm_adjustParameters() :
|
||||
* If the params->hashEveryLog is not set, set it to its default value based on
|
||||
* windowLog and params->hashLog.
|
||||
*
|
||||
* Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
|
||||
* params->hashLog if it is not).
|
||||
*
|
||||
* Ensures that the minMatchLength >= targetLength during optimal parsing.
|
||||
*/
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
||||
ZSTD_compressionParameters const* cParams);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_FAST_H */
|
381
third/github.com/DataDog/zstd/zstd_legacy.h
Normal file
381
third/github.com/DataDog/zstd/zstd_legacy.h
Normal file
@ -0,0 +1,381 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LEGACY_H
|
||||
#define ZSTD_LEGACY_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include "mem.h" /* MEM_STATIC */
|
||||
#include "error_private.h" /* ERROR */
|
||||
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer */
|
||||
|
||||
#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
|
||||
# undef ZSTD_LEGACY_SUPPORT
|
||||
# define ZSTD_LEGACY_SUPPORT 8
|
||||
#endif
|
||||
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
# include "zstd_v01.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
# include "zstd_v02.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
# include "zstd_v03.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
# include "zstd_v04.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
# include "zstd_v05.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
# include "zstd_v06.h"
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
# include "zstd_v07.h"
|
||||
#endif
|
||||
|
||||
/** ZSTD_isLegacy() :
|
||||
@return : > 0 if supported by legacy decoder. 0 otherwise.
|
||||
return value is the version.
|
||||
*/
|
||||
MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
|
||||
{
|
||||
U32 magicNumberLE;
|
||||
if (srcSize<4) return 0;
|
||||
magicNumberLE = MEM_readLE32(src);
|
||||
switch(magicNumberLE)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case ZSTDv01_magicNumberLE:return 1;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case ZSTDv02_magicNumber : return 2;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case ZSTDv03_magicNumber : return 3;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case ZSTDv04_magicNumber : return 4;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case ZSTDv05_MAGICNUMBER : return 5;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case ZSTDv06_MAGICNUMBER : return 6;
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case ZSTDv07_MAGICNUMBER : return 7;
|
||||
#endif
|
||||
default : return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize)
|
||||
{
|
||||
U32 const version = ZSTD_isLegacy(src, srcSize);
|
||||
if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
if (version==5) {
|
||||
ZSTDv05_parameters fParams;
|
||||
size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.srcSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
if (version==6) {
|
||||
ZSTDv06_frameParams fParams;
|
||||
size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.frameContentSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
if (version==7) {
|
||||
ZSTDv07_frameParams fParams;
|
||||
size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
|
||||
if (frResult != 0) return 0;
|
||||
return fParams.frameContentSize;
|
||||
}
|
||||
#endif
|
||||
return 0; /* should not be possible */
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_decompressLegacy(
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize,
|
||||
const void* dict,size_t dictSize)
|
||||
{
|
||||
U32 const version = ZSTD_isLegacy(src, compressedSize);
|
||||
(void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
|
||||
switch(version)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case 1 :
|
||||
return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case 2 :
|
||||
return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case 3 :
|
||||
return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{ size_t result;
|
||||
ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
|
||||
if (zd==NULL) return ERROR(memory_allocation);
|
||||
result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
|
||||
ZSTDv05_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{ size_t result;
|
||||
ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
|
||||
if (zd==NULL) return ERROR(memory_allocation);
|
||||
result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
|
||||
ZSTDv06_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{ size_t result;
|
||||
ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
|
||||
if (zd==NULL) return ERROR(memory_allocation);
|
||||
result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
|
||||
ZSTDv07_freeDCtx(zd);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
default :
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src,
|
||||
size_t compressedSize)
|
||||
{
|
||||
U32 const version = ZSTD_isLegacy(src, compressedSize);
|
||||
switch(version)
|
||||
{
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 1)
|
||||
case 1 :
|
||||
return ZSTDv01_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 2)
|
||||
case 2 :
|
||||
return ZSTDv02_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 3)
|
||||
case 3 :
|
||||
return ZSTDv03_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
return ZSTDv04_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
return ZSTDv05_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
return ZSTDv06_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
return ZSTDv07_findFrameCompressedSize(src, compressedSize);
|
||||
#endif
|
||||
default :
|
||||
return ERROR(prefix_unknown);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
|
||||
{
|
||||
switch(version)
|
||||
{
|
||||
default :
|
||||
case 1 :
|
||||
case 2 :
|
||||
case 3 :
|
||||
(void)legacyContext;
|
||||
return ERROR(version_unsupported);
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,
|
||||
const void* dict, size_t dictSize)
|
||||
{
|
||||
DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion);
|
||||
if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);
|
||||
switch(newVersion)
|
||||
{
|
||||
default :
|
||||
case 1 :
|
||||
case 2 :
|
||||
case 3 :
|
||||
(void)dict; (void)dictSize;
|
||||
return 0;
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
{
|
||||
ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
ZBUFFv04_decompressInit(dctx);
|
||||
ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize);
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{
|
||||
ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize);
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{
|
||||
ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize);
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{
|
||||
ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
|
||||
if (dctx==NULL) return ERROR(memory_allocation);
|
||||
ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize);
|
||||
*legacyContext = dctx;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
|
||||
ZSTD_outBuffer* output, ZSTD_inBuffer* input)
|
||||
{
|
||||
DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version);
|
||||
switch(version)
|
||||
{
|
||||
default :
|
||||
case 1 :
|
||||
case 2 :
|
||||
case 3 :
|
||||
(void)legacyContext; (void)output; (void)input;
|
||||
return ERROR(version_unsupported);
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 4)
|
||||
case 4 :
|
||||
{
|
||||
ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
|
||||
const void* src = (const char*)input->src + input->pos;
|
||||
size_t readSize = input->size - input->pos;
|
||||
void* dst = (char*)output->dst + output->pos;
|
||||
size_t decodedSize = output->size - output->pos;
|
||||
size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
|
||||
output->pos += decodedSize;
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 5)
|
||||
case 5 :
|
||||
{
|
||||
ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
|
||||
const void* src = (const char*)input->src + input->pos;
|
||||
size_t readSize = input->size - input->pos;
|
||||
void* dst = (char*)output->dst + output->pos;
|
||||
size_t decodedSize = output->size - output->pos;
|
||||
size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
|
||||
output->pos += decodedSize;
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 6)
|
||||
case 6 :
|
||||
{
|
||||
ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
|
||||
const void* src = (const char*)input->src + input->pos;
|
||||
size_t readSize = input->size - input->pos;
|
||||
void* dst = (char*)output->dst + output->pos;
|
||||
size_t decodedSize = output->size - output->pos;
|
||||
size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
|
||||
output->pos += decodedSize;
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
#if (ZSTD_LEGACY_SUPPORT <= 7)
|
||||
case 7 :
|
||||
{
|
||||
ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
|
||||
const void* src = (const char*)input->src + input->pos;
|
||||
size_t readSize = input->size - input->pos;
|
||||
void* dst = (char*)output->dst + output->pos;
|
||||
size_t decodedSize = output->size - output->pos;
|
||||
size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
|
||||
output->pos += decodedSize;
|
||||
input->pos += readSize;
|
||||
return hintSize;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_LEGACY_H */
|
923
third/github.com/DataDog/zstd/zstd_opt.c
Normal file
923
third/github.com/DataDog/zstd/zstd_opt.c
Normal file
@ -0,0 +1,923 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
#include "zstd_opt.h"
|
||||
|
||||
|
||||
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats. Also used for matchSum (?) */
|
||||
#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */
|
||||
#define ZSTD_MAX_PRICE (1<<30)
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Price functions for optimal parser
|
||||
***************************************/
|
||||
static void ZSTD_setLog2Prices(optState_t* optPtr)
|
||||
{
|
||||
optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
|
||||
optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
|
||||
optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
|
||||
optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
|
||||
}
|
||||
|
||||
|
||||
static void ZSTD_rescaleFreqs(optState_t* const optPtr,
|
||||
const BYTE* const src, size_t const srcSize)
|
||||
{
|
||||
optPtr->staticPrices = 0;
|
||||
|
||||
if (optPtr->litLengthSum == 0) { /* first init */
|
||||
unsigned u;
|
||||
if (srcSize <= 1024) optPtr->staticPrices = 1;
|
||||
|
||||
assert(optPtr->litFreq!=NULL);
|
||||
for (u=0; u<=MaxLit; u++)
|
||||
optPtr->litFreq[u] = 0;
|
||||
for (u=0; u<srcSize; u++)
|
||||
optPtr->litFreq[src[u]]++;
|
||||
optPtr->litSum = 0;
|
||||
for (u=0; u<=MaxLit; u++) {
|
||||
optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> ZSTD_FREQ_DIV);
|
||||
optPtr->litSum += optPtr->litFreq[u];
|
||||
}
|
||||
|
||||
for (u=0; u<=MaxLL; u++)
|
||||
optPtr->litLengthFreq[u] = 1;
|
||||
optPtr->litLengthSum = MaxLL+1;
|
||||
for (u=0; u<=MaxML; u++)
|
||||
optPtr->matchLengthFreq[u] = 1;
|
||||
optPtr->matchLengthSum = MaxML+1;
|
||||
for (u=0; u<=MaxOff; u++)
|
||||
optPtr->offCodeFreq[u] = 1;
|
||||
optPtr->offCodeSum = (MaxOff+1);
|
||||
|
||||
} else {
|
||||
unsigned u;
|
||||
|
||||
optPtr->litSum = 0;
|
||||
for (u=0; u<=MaxLit; u++) {
|
||||
optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> (ZSTD_FREQ_DIV+1));
|
||||
optPtr->litSum += optPtr->litFreq[u];
|
||||
}
|
||||
optPtr->litLengthSum = 0;
|
||||
for (u=0; u<=MaxLL; u++) {
|
||||
optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
|
||||
optPtr->litLengthSum += optPtr->litLengthFreq[u];
|
||||
}
|
||||
optPtr->matchLengthSum = 0;
|
||||
for (u=0; u<=MaxML; u++) {
|
||||
optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
|
||||
optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
|
||||
}
|
||||
optPtr->offCodeSum = 0;
|
||||
for (u=0; u<=MaxOff; u++) {
|
||||
optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
|
||||
optPtr->offCodeSum += optPtr->offCodeFreq[u];
|
||||
}
|
||||
}
|
||||
|
||||
ZSTD_setLog2Prices(optPtr);
|
||||
}
|
||||
|
||||
|
||||
/* ZSTD_rawLiteralsCost() :
|
||||
* cost of literals (only) in given segment (which length can be null)
|
||||
* does not include cost of literalLength symbol */
|
||||
static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
|
||||
const optState_t* const optPtr)
|
||||
{
|
||||
if (optPtr->staticPrices) return (litLength*6); /* 6 bit per literal - no statistic used */
|
||||
if (litLength == 0) return 0;
|
||||
|
||||
/* literals */
|
||||
{ U32 u;
|
||||
U32 cost = litLength * optPtr->log2litSum;
|
||||
for (u=0; u < litLength; u++)
|
||||
cost -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
|
||||
return cost;
|
||||
}
|
||||
}
|
||||
|
||||
/* ZSTD_litLengthPrice() :
|
||||
* cost of literalLength symbol */
|
||||
static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr)
|
||||
{
|
||||
if (optPtr->staticPrices) return ZSTD_highbit32((U32)litLength+1);
|
||||
|
||||
/* literal Length */
|
||||
{ U32 const llCode = ZSTD_LLcode(litLength);
|
||||
U32 const price = LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
|
||||
return price;
|
||||
}
|
||||
}
|
||||
|
||||
/* ZSTD_litLengthPrice() :
|
||||
* cost of the literal part of a sequence,
|
||||
* including literals themselves, and literalLength symbol */
|
||||
static U32 ZSTD_fullLiteralsCost(const BYTE* const literals, U32 const litLength,
|
||||
const optState_t* const optPtr)
|
||||
{
|
||||
return ZSTD_rawLiteralsCost(literals, litLength, optPtr)
|
||||
+ ZSTD_litLengthPrice(litLength, optPtr);
|
||||
}
|
||||
|
||||
/* ZSTD_litLengthContribution() :
|
||||
* @return ( cost(litlength) - cost(0) )
|
||||
* this value can then be added to rawLiteralsCost()
|
||||
* to provide a cost which is directly comparable to a match ending at same position */
|
||||
static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr)
|
||||
{
|
||||
if (optPtr->staticPrices) return ZSTD_highbit32(litLength+1);
|
||||
|
||||
/* literal Length */
|
||||
{ U32 const llCode = ZSTD_LLcode(litLength);
|
||||
int const contribution = LL_bits[llCode]
|
||||
+ ZSTD_highbit32(optPtr->litLengthFreq[0]+1)
|
||||
- ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
|
||||
#if 1
|
||||
return contribution;
|
||||
#else
|
||||
return MAX(0, contribution); /* sometimes better, sometimes not ... */
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/* ZSTD_literalsContribution() :
|
||||
* creates a fake cost for the literals part of a sequence
|
||||
* which can be compared to the ending cost of a match
|
||||
* should a new match start at this position */
|
||||
static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
|
||||
const optState_t* const optPtr)
|
||||
{
|
||||
int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr)
|
||||
+ ZSTD_litLengthContribution(litLength, optPtr);
|
||||
return contribution;
|
||||
}
|
||||
|
||||
/* ZSTD_getMatchPrice() :
|
||||
* Provides the cost of the match part (offset + matchLength) of a sequence
|
||||
* Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
|
||||
* optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
|
||||
FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice(
|
||||
U32 const offset, U32 const matchLength,
|
||||
const optState_t* const optPtr,
|
||||
int const optLevel)
|
||||
{
|
||||
U32 price;
|
||||
U32 const offCode = ZSTD_highbit32(offset+1);
|
||||
U32 const mlBase = matchLength - MINMATCH;
|
||||
assert(matchLength >= MINMATCH);
|
||||
|
||||
if (optPtr->staticPrices) /* fixed scheme, do not use statistics */
|
||||
return ZSTD_highbit32((U32)mlBase+1) + 16 + offCode;
|
||||
|
||||
price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
|
||||
if ((optLevel<2) /*static*/ && offCode >= 20) price += (offCode-19)*2; /* handicap for long distance offsets, favor decompression speed */
|
||||
|
||||
/* match Length */
|
||||
{ U32 const mlCode = ZSTD_MLcode(mlBase);
|
||||
price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
|
||||
}
|
||||
|
||||
DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
|
||||
return price;
|
||||
}
|
||||
|
||||
static void ZSTD_updateStats(optState_t* const optPtr,
|
||||
U32 litLength, const BYTE* literals,
|
||||
U32 offsetCode, U32 matchLength)
|
||||
{
|
||||
/* literals */
|
||||
{ U32 u;
|
||||
for (u=0; u < litLength; u++)
|
||||
optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
|
||||
optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
|
||||
}
|
||||
|
||||
/* literal Length */
|
||||
{ U32 const llCode = ZSTD_LLcode(litLength);
|
||||
optPtr->litLengthFreq[llCode]++;
|
||||
optPtr->litLengthSum++;
|
||||
}
|
||||
|
||||
/* match offset code (0-2=>repCode; 3+=>offset+2) */
|
||||
{ U32 const offCode = ZSTD_highbit32(offsetCode+1);
|
||||
assert(offCode <= MaxOff);
|
||||
optPtr->offCodeFreq[offCode]++;
|
||||
optPtr->offCodeSum++;
|
||||
}
|
||||
|
||||
/* match Length */
|
||||
{ U32 const mlBase = matchLength - MINMATCH;
|
||||
U32 const mlCode = ZSTD_MLcode(mlBase);
|
||||
optPtr->matchLengthFreq[mlCode]++;
|
||||
optPtr->matchLengthSum++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ZSTD_readMINMATCH() :
|
||||
* function safe only for comparisons
|
||||
* assumption : memPtr must be at least 4 bytes before end of buffer */
|
||||
MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
|
||||
{
|
||||
switch (length)
|
||||
{
|
||||
default :
|
||||
case 4 : return MEM_read32(memPtr);
|
||||
case 3 : if (MEM_isLittleEndian())
|
||||
return MEM_read32(memPtr)<<8;
|
||||
else
|
||||
return MEM_read32(memPtr)>>8;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Update hashTable3 up to ip (excluded)
|
||||
Assumption : always within prefix (i.e. not within extDict) */
|
||||
static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE* const ip)
|
||||
{
|
||||
U32* const hashTable3 = ms->hashTable3;
|
||||
U32 const hashLog3 = ms->hashLog3;
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 idx = ms->nextToUpdate3;
|
||||
U32 const target = ms->nextToUpdate3 = (U32)(ip - base);
|
||||
size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
|
||||
assert(hashLog3 > 0);
|
||||
|
||||
while(idx < target) {
|
||||
hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
|
||||
idx++;
|
||||
}
|
||||
|
||||
return hashTable3[hash3];
|
||||
}
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Binary Tree search
|
||||
***************************************/
|
||||
/** ZSTD_insertBt1() : add one or multiple positions to tree.
|
||||
* ip : assumed <= iend-8 .
|
||||
* @return : nb of positions added */
|
||||
static U32 ZSTD_insertBt1(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
U32 const mls, U32 const extDict)
|
||||
{
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hashLog = cParams->hashLog;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask = (1 << btLog) - 1;
|
||||
U32 matchIndex = hashTable[h];
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
const BYTE* match;
|
||||
const U32 current = (U32)(ip-base);
|
||||
const U32 btLow = btMask >= current ? 0 : current - btMask;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = smallerPtr + 1;
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
U32 const windowLow = ms->window.lowLimit;
|
||||
U32 matchEndIdx = current+8+1;
|
||||
size_t bestLength = 8;
|
||||
U32 nbCompares = 1U << cParams->searchLog;
|
||||
#ifdef ZSTD_C_PREDICT
|
||||
U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
|
||||
U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
|
||||
predictedSmall += (predictedSmall>0);
|
||||
predictedLarge += (predictedLarge>0);
|
||||
#endif /* ZSTD_C_PREDICT */
|
||||
|
||||
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
|
||||
|
||||
assert(ip <= iend-8); /* required for h calculation */
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
assert(matchIndex < current);
|
||||
|
||||
#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
|
||||
const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
|
||||
if (matchIndex == predictedSmall) {
|
||||
/* no need to check length, result known */
|
||||
*smallerPtr = matchIndex;
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
|
||||
predictedSmall = predictPtr[1] + (predictPtr[1]>0);
|
||||
continue;
|
||||
}
|
||||
if (matchIndex == predictedLarge) {
|
||||
*largerPtr = matchIndex;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
predictedLarge = predictPtr[0] + (predictPtr[0]>0);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
assert(matchIndex+matchLength >= dictLimit); /* might be wrong if extDict is incorrectly set to 0 */
|
||||
match = base + matchIndex;
|
||||
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
bestLength = matchLength;
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
}
|
||||
|
||||
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
|
||||
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
|
||||
/* match is smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
|
||||
} else {
|
||||
/* match is larger than current */
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
|
||||
assert(matchEndIdx > current + 8);
|
||||
return matchEndIdx - (current + 8);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
void ZSTD_updateTree_internal(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iend,
|
||||
const U32 mls, const U32 extDict)
|
||||
{
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 const target = (U32)(ip - base);
|
||||
U32 idx = ms->nextToUpdate;
|
||||
DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (extDict:%u)",
|
||||
idx, target, extDict);
|
||||
|
||||
while(idx < target)
|
||||
idx += ZSTD_insertBt1(ms, cParams, base+idx, iend, mls, extDict);
|
||||
ms->nextToUpdate = target;
|
||||
}
|
||||
|
||||
void ZSTD_updateTree(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iend)
|
||||
{
|
||||
ZSTD_updateTree_internal(ms, cParams, ip, iend, cParams->searchLength, 0 /*extDict*/);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
U32 ZSTD_insertBtAndGetAllMatches (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* const ip, const BYTE* const iLimit, int const extDict,
|
||||
U32 rep[ZSTD_REP_NUM], U32 const ll0,
|
||||
ZSTD_match_t* matches, const U32 lengthToBeat, U32 const mls /* template */)
|
||||
{
|
||||
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
|
||||
const BYTE* const base = ms->window.base;
|
||||
U32 const current = (U32)(ip-base);
|
||||
U32 const hashLog = cParams->hashLog;
|
||||
U32 const minMatch = (mls==3) ? 3 : 4;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
|
||||
U32 matchIndex = hashTable[h];
|
||||
U32* const bt = ms->chainTable;
|
||||
U32 const btLog = cParams->chainLog - 1;
|
||||
U32 const btMask= (1U << btLog) - 1;
|
||||
size_t commonLengthSmaller=0, commonLengthLarger=0;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
U32 const dictLimit = ms->window.dictLimit;
|
||||
const BYTE* const dictEnd = dictBase + dictLimit;
|
||||
const BYTE* const prefixStart = base + dictLimit;
|
||||
U32 const btLow = btMask >= current ? 0 : current - btMask;
|
||||
U32 const windowLow = ms->window.lowLimit;
|
||||
U32* smallerPtr = bt + 2*(current&btMask);
|
||||
U32* largerPtr = bt + 2*(current&btMask) + 1;
|
||||
U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */
|
||||
U32 dummy32; /* to be nullified at the end */
|
||||
U32 mnum = 0;
|
||||
U32 nbCompares = 1U << cParams->searchLog;
|
||||
|
||||
size_t bestLength = lengthToBeat-1;
|
||||
DEBUGLOG(7, "ZSTD_insertBtAndGetAllMatches");
|
||||
|
||||
/* check repCode */
|
||||
{ U32 const lastR = ZSTD_REP_NUM + ll0;
|
||||
U32 repCode;
|
||||
for (repCode = ll0; repCode < lastR; repCode++) {
|
||||
U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
|
||||
U32 const repIndex = current - repOffset;
|
||||
U32 repLen = 0;
|
||||
assert(current >= dictLimit);
|
||||
if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */
|
||||
if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) {
|
||||
repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
|
||||
}
|
||||
} else { /* repIndex < dictLimit || repIndex >= current */
|
||||
const BYTE* const repMatch = dictBase + repIndex;
|
||||
assert(current >= windowLow);
|
||||
if ( extDict /* this case only valid in extDict mode */
|
||||
&& ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */
|
||||
& (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
|
||||
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
|
||||
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
|
||||
} }
|
||||
/* save longer solution */
|
||||
if (repLen > bestLength) {
|
||||
DEBUGLOG(8, "found rep-match %u of length %u",
|
||||
repCode - ll0, (U32)repLen);
|
||||
bestLength = repLen;
|
||||
matches[mnum].off = repCode - ll0;
|
||||
matches[mnum].len = (U32)repLen;
|
||||
mnum++;
|
||||
if ( (repLen > sufficient_len)
|
||||
| (ip+repLen == iLimit) ) { /* best possible */
|
||||
return mnum;
|
||||
} } } }
|
||||
|
||||
/* HC3 match finder */
|
||||
if ((mls == 3) /*static*/ && (bestLength < mls)) {
|
||||
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip);
|
||||
if ((matchIndex3 > windowLow)
|
||||
& (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
|
||||
size_t mlen;
|
||||
if ((!extDict) /*static*/ || (matchIndex3 >= dictLimit)) {
|
||||
const BYTE* const match = base + matchIndex3;
|
||||
mlen = ZSTD_count(ip, match, iLimit);
|
||||
} else {
|
||||
const BYTE* const match = dictBase + matchIndex3;
|
||||
mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
|
||||
}
|
||||
|
||||
/* save best solution */
|
||||
if (mlen >= mls /* == 3 > bestLength */) {
|
||||
DEBUGLOG(8, "found small match with hlog3, of length %u",
|
||||
(U32)mlen);
|
||||
bestLength = mlen;
|
||||
assert(current > matchIndex3);
|
||||
assert(mnum==0); /* no prior solution */
|
||||
matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE;
|
||||
matches[0].len = (U32)mlen;
|
||||
mnum = 1;
|
||||
if ( (mlen > sufficient_len) |
|
||||
(ip+mlen == iLimit) ) { /* best possible length */
|
||||
ms->nextToUpdate = current+1; /* skip insertion */
|
||||
return 1;
|
||||
} } } }
|
||||
|
||||
hashTable[h] = current; /* Update Hash Table */
|
||||
|
||||
while (nbCompares-- && (matchIndex > windowLow)) {
|
||||
U32* const nextPtr = bt + 2*(matchIndex & btMask);
|
||||
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
|
||||
const BYTE* match;
|
||||
assert(current > matchIndex);
|
||||
|
||||
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
|
||||
assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
|
||||
match = base + matchIndex;
|
||||
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
|
||||
} else {
|
||||
match = dictBase + matchIndex;
|
||||
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
|
||||
if (matchIndex+matchLength >= dictLimit)
|
||||
match = base + matchIndex; /* prepare for match[matchLength] */
|
||||
}
|
||||
|
||||
if (matchLength > bestLength) {
|
||||
DEBUGLOG(8, "found match of length %u at distance %u",
|
||||
(U32)matchLength, current - matchIndex);
|
||||
assert(matchEndIdx > matchIndex);
|
||||
if (matchLength > matchEndIdx - matchIndex)
|
||||
matchEndIdx = matchIndex + (U32)matchLength;
|
||||
bestLength = matchLength;
|
||||
matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
|
||||
matches[mnum].len = (U32)matchLength;
|
||||
mnum++;
|
||||
if (matchLength > ZSTD_OPT_NUM) break;
|
||||
if (ip+matchLength == iLimit) { /* equal : no way to know if inf or sup */
|
||||
break; /* drop, to preserve bt consistency (miss a little bit of compression) */
|
||||
}
|
||||
}
|
||||
|
||||
if (match[matchLength] < ip[matchLength]) {
|
||||
/* match smaller than current */
|
||||
*smallerPtr = matchIndex; /* update smaller idx */
|
||||
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
|
||||
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
|
||||
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
|
||||
} else {
|
||||
*largerPtr = matchIndex;
|
||||
commonLengthLarger = matchLength;
|
||||
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
|
||||
largerPtr = nextPtr;
|
||||
matchIndex = nextPtr[0];
|
||||
} }
|
||||
|
||||
*smallerPtr = *largerPtr = 0;
|
||||
|
||||
assert(matchEndIdx > current+8);
|
||||
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
|
||||
return mnum;
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* const iHighLimit, int const extDict,
|
||||
U32 rep[ZSTD_REP_NUM], U32 const ll0,
|
||||
ZSTD_match_t* matches, U32 const lengthToBeat)
|
||||
{
|
||||
U32 const matchLengthSearch = cParams->searchLength;
|
||||
DEBUGLOG(7, "ZSTD_BtGetAllMatches");
|
||||
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
|
||||
ZSTD_updateTree_internal(ms, cParams, ip, iHighLimit, matchLengthSearch, extDict);
|
||||
switch(matchLengthSearch)
|
||||
{
|
||||
case 3 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 3);
|
||||
default :
|
||||
case 4 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 4);
|
||||
case 5 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 5);
|
||||
case 7 :
|
||||
case 6 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 6);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*-*******************************
|
||||
* Optimal parser
|
||||
*********************************/
|
||||
typedef struct repcodes_s {
|
||||
U32 rep[3];
|
||||
} repcodes_t;
|
||||
|
||||
repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
|
||||
{
|
||||
repcodes_t newReps;
|
||||
if (offset >= ZSTD_REP_NUM) { /* full offset */
|
||||
newReps.rep[2] = rep[1];
|
||||
newReps.rep[1] = rep[0];
|
||||
newReps.rep[0] = offset - ZSTD_REP_MOVE;
|
||||
} else { /* repcode */
|
||||
U32 const repCode = offset + ll0;
|
||||
if (repCode > 0) { /* note : if repCode==0, no change */
|
||||
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
|
||||
newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
|
||||
newReps.rep[1] = rep[0];
|
||||
newReps.rep[0] = currentOffset;
|
||||
} else { /* repCode == 0 */
|
||||
memcpy(&newReps, rep, sizeof(newReps));
|
||||
}
|
||||
}
|
||||
return newReps;
|
||||
}
|
||||
|
||||
|
||||
typedef struct {
|
||||
const BYTE* anchor;
|
||||
U32 litlen;
|
||||
U32 rawLitCost;
|
||||
} cachedLiteralPrice_t;
|
||||
|
||||
static U32 ZSTD_rawLiteralsCost_cached(
|
||||
cachedLiteralPrice_t* const cachedLitPrice,
|
||||
const BYTE* const anchor, U32 const litlen,
|
||||
const optState_t* const optStatePtr)
|
||||
{
|
||||
U32 startCost;
|
||||
U32 remainingLength;
|
||||
const BYTE* startPosition;
|
||||
|
||||
if (anchor == cachedLitPrice->anchor) {
|
||||
startCost = cachedLitPrice->rawLitCost;
|
||||
startPosition = anchor + cachedLitPrice->litlen;
|
||||
assert(litlen >= cachedLitPrice->litlen);
|
||||
remainingLength = litlen - cachedLitPrice->litlen;
|
||||
} else {
|
||||
startCost = 0;
|
||||
startPosition = anchor;
|
||||
remainingLength = litlen;
|
||||
}
|
||||
|
||||
{ U32 const rawLitCost = startCost + ZSTD_rawLiteralsCost(startPosition, remainingLength, optStatePtr);
|
||||
cachedLitPrice->anchor = anchor;
|
||||
cachedLitPrice->litlen = litlen;
|
||||
cachedLitPrice->rawLitCost = rawLitCost;
|
||||
return rawLitCost;
|
||||
}
|
||||
}
|
||||
|
||||
static U32 ZSTD_fullLiteralsCost_cached(
|
||||
cachedLiteralPrice_t* const cachedLitPrice,
|
||||
const BYTE* const anchor, U32 const litlen,
|
||||
const optState_t* const optStatePtr)
|
||||
{
|
||||
return ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
|
||||
+ ZSTD_litLengthPrice(litlen, optStatePtr);
|
||||
}
|
||||
|
||||
static int ZSTD_literalsContribution_cached(
|
||||
cachedLiteralPrice_t* const cachedLitPrice,
|
||||
const BYTE* const anchor, U32 const litlen,
|
||||
const optState_t* const optStatePtr)
|
||||
{
|
||||
int const contribution = ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
|
||||
+ ZSTD_litLengthContribution(litlen, optStatePtr);
|
||||
return contribution;
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore,
|
||||
U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams,
|
||||
const void* src, size_t srcSize,
|
||||
const int optLevel, const int extDict)
|
||||
{
|
||||
optState_t* const optStatePtr = &ms->opt;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const prefixStart = base + ms->window.dictLimit;
|
||||
|
||||
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
|
||||
U32 const minMatch = (cParams->searchLength == 3) ? 3 : 4;
|
||||
|
||||
ZSTD_optimal_t* const opt = optStatePtr->priceTable;
|
||||
ZSTD_match_t* const matches = optStatePtr->matchTable;
|
||||
cachedLiteralPrice_t cachedLitPrice;
|
||||
|
||||
/* init */
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_opt_generic");
|
||||
ms->nextToUpdate3 = ms->nextToUpdate;
|
||||
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
|
||||
ip += (ip==prefixStart);
|
||||
memset(&cachedLitPrice, 0, sizeof(cachedLitPrice));
|
||||
|
||||
/* Match Loop */
|
||||
while (ip < ilimit) {
|
||||
U32 cur, last_pos = 0;
|
||||
U32 best_mlen, best_off;
|
||||
|
||||
/* find first match */
|
||||
{ U32 const litlen = (U32)(ip - anchor);
|
||||
U32 const ll0 = !litlen;
|
||||
U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, ip, iend, extDict, rep, ll0, matches, minMatch);
|
||||
if (!nbMatches) { ip++; continue; }
|
||||
|
||||
/* initialize opt[0] */
|
||||
{ U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
|
||||
opt[0].mlen = 1;
|
||||
opt[0].litlen = litlen;
|
||||
|
||||
/* large match -> immediate encoding */
|
||||
{ U32 const maxML = matches[nbMatches-1].len;
|
||||
DEBUGLOG(7, "found %u matches of maxLength=%u and offset=%u at cPos=%u => start new serie",
|
||||
nbMatches, maxML, matches[nbMatches-1].off, (U32)(ip-prefixStart));
|
||||
|
||||
if (maxML > sufficient_len) {
|
||||
best_mlen = maxML;
|
||||
best_off = matches[nbMatches-1].off;
|
||||
DEBUGLOG(7, "large match (%u>%u), immediate encoding",
|
||||
best_mlen, sufficient_len);
|
||||
cur = 0;
|
||||
last_pos = 1;
|
||||
goto _shortestPath;
|
||||
} }
|
||||
|
||||
/* set prices for first matches starting position == 0 */
|
||||
{ U32 const literalsPrice = ZSTD_fullLiteralsCost_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
|
||||
U32 pos;
|
||||
U32 matchNb;
|
||||
for (pos = 0; pos < minMatch; pos++) {
|
||||
opt[pos].mlen = 1;
|
||||
opt[pos].price = ZSTD_MAX_PRICE;
|
||||
}
|
||||
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
|
||||
U32 const offset = matches[matchNb].off;
|
||||
U32 const end = matches[matchNb].len;
|
||||
repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
|
||||
for ( ; pos <= end ; pos++ ) {
|
||||
U32 const matchPrice = literalsPrice + ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
|
||||
DEBUGLOG(7, "rPos:%u => set initial price : %u",
|
||||
pos, matchPrice);
|
||||
opt[pos].mlen = pos;
|
||||
opt[pos].off = offset;
|
||||
opt[pos].litlen = litlen;
|
||||
opt[pos].price = matchPrice;
|
||||
memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
|
||||
} }
|
||||
last_pos = pos-1;
|
||||
}
|
||||
}
|
||||
|
||||
/* check further positions */
|
||||
for (cur = 1; cur <= last_pos; cur++) {
|
||||
const BYTE* const inr = ip + cur;
|
||||
assert(cur < ZSTD_OPT_NUM);
|
||||
|
||||
/* Fix current position with one literal if cheaper */
|
||||
{ U32 const litlen = (opt[cur-1].mlen == 1) ? opt[cur-1].litlen + 1 : 1;
|
||||
int price; /* note : contribution can be negative */
|
||||
if (cur > litlen) {
|
||||
price = opt[cur - litlen].price + ZSTD_literalsContribution(inr-litlen, litlen, optStatePtr);
|
||||
} else {
|
||||
price = ZSTD_literalsContribution_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
|
||||
}
|
||||
assert(price < 1000000000); /* overflow check */
|
||||
if (price <= opt[cur].price) {
|
||||
DEBUGLOG(7, "rPos:%u : better price (%u<%u) using literal",
|
||||
cur, price, opt[cur].price);
|
||||
opt[cur].mlen = 1;
|
||||
opt[cur].off = 0;
|
||||
opt[cur].litlen = litlen;
|
||||
opt[cur].price = price;
|
||||
memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
|
||||
} }
|
||||
|
||||
/* last match must start at a minimum distance of 8 from oend */
|
||||
if (inr > ilimit) continue;
|
||||
|
||||
if (cur == last_pos) break;
|
||||
|
||||
if ( (optLevel==0) /*static*/
|
||||
&& (opt[cur+1].price <= opt[cur].price) )
|
||||
continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
|
||||
|
||||
{ U32 const ll0 = (opt[cur].mlen != 1);
|
||||
U32 const litlen = (opt[cur].mlen == 1) ? opt[cur].litlen : 0;
|
||||
U32 const previousPrice = (cur > litlen) ? opt[cur-litlen].price : 0;
|
||||
U32 const basePrice = previousPrice + ZSTD_fullLiteralsCost(inr-litlen, litlen, optStatePtr);
|
||||
U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, inr, iend, extDict, opt[cur].rep, ll0, matches, minMatch);
|
||||
U32 matchNb;
|
||||
if (!nbMatches) continue;
|
||||
|
||||
{ U32 const maxML = matches[nbMatches-1].len;
|
||||
DEBUGLOG(7, "rPos:%u, found %u matches, of maxLength=%u",
|
||||
cur, nbMatches, maxML);
|
||||
|
||||
if ( (maxML > sufficient_len)
|
||||
| (cur + maxML >= ZSTD_OPT_NUM) ) {
|
||||
best_mlen = maxML;
|
||||
best_off = matches[nbMatches-1].off;
|
||||
last_pos = cur + 1;
|
||||
goto _shortestPath;
|
||||
}
|
||||
}
|
||||
|
||||
/* set prices using matches found at position == cur */
|
||||
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
|
||||
U32 const offset = matches[matchNb].off;
|
||||
repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0);
|
||||
U32 const lastML = matches[matchNb].len;
|
||||
U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
|
||||
U32 mlen;
|
||||
|
||||
DEBUGLOG(7, "testing match %u => offCode=%u, mlen=%u, llen=%u",
|
||||
matchNb, matches[matchNb].off, lastML, litlen);
|
||||
|
||||
for (mlen = lastML; mlen >= startML; mlen--) {
|
||||
U32 const pos = cur + mlen;
|
||||
int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
|
||||
|
||||
if ((pos > last_pos) || (price < opt[pos].price)) {
|
||||
DEBUGLOG(7, "rPos:%u => new better price (%u<%u)",
|
||||
pos, price, opt[pos].price);
|
||||
while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }
|
||||
opt[pos].mlen = mlen;
|
||||
opt[pos].off = offset;
|
||||
opt[pos].litlen = litlen;
|
||||
opt[pos].price = price;
|
||||
memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
|
||||
} else {
|
||||
if (optLevel==0) break; /* gets ~+10% speed for about -0.01 ratio loss */
|
||||
}
|
||||
} } }
|
||||
} /* for (cur = 1; cur <= last_pos; cur++) */
|
||||
|
||||
best_mlen = opt[last_pos].mlen;
|
||||
best_off = opt[last_pos].off;
|
||||
cur = last_pos - best_mlen;
|
||||
|
||||
_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
assert(opt[0].mlen == 1);
|
||||
|
||||
/* reverse traversal */
|
||||
DEBUGLOG(7, "start reverse traversal (last_pos:%u, cur:%u)",
|
||||
last_pos, cur);
|
||||
{ U32 selectedMatchLength = best_mlen;
|
||||
U32 selectedOffset = best_off;
|
||||
U32 pos = cur;
|
||||
while (1) {
|
||||
U32 const mlen = opt[pos].mlen;
|
||||
U32 const off = opt[pos].off;
|
||||
opt[pos].mlen = selectedMatchLength;
|
||||
opt[pos].off = selectedOffset;
|
||||
selectedMatchLength = mlen;
|
||||
selectedOffset = off;
|
||||
if (mlen > pos) break;
|
||||
pos -= mlen;
|
||||
} }
|
||||
|
||||
/* save sequences */
|
||||
{ U32 pos;
|
||||
for (pos=0; pos < last_pos; ) {
|
||||
U32 const llen = (U32)(ip - anchor);
|
||||
U32 const mlen = opt[pos].mlen;
|
||||
U32 const offset = opt[pos].off;
|
||||
if (mlen == 1) { ip++; pos++; continue; } /* literal position => move on */
|
||||
pos += mlen; ip += mlen;
|
||||
|
||||
/* repcodes update : like ZSTD_updateRep(), but update in place */
|
||||
if (offset >= ZSTD_REP_NUM) { /* full offset */
|
||||
rep[2] = rep[1];
|
||||
rep[1] = rep[0];
|
||||
rep[0] = offset - ZSTD_REP_MOVE;
|
||||
} else { /* repcode */
|
||||
U32 const repCode = offset + (llen==0);
|
||||
if (repCode) { /* note : if repCode==0, no change */
|
||||
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
|
||||
if (repCode >= 2) rep[2] = rep[1];
|
||||
rep[1] = rep[0];
|
||||
rep[0] = currentOffset;
|
||||
}
|
||||
}
|
||||
|
||||
ZSTD_updateStats(optStatePtr, llen, anchor, offset, mlen);
|
||||
ZSTD_storeSeq(seqStore, llen, anchor, offset, mlen-MINMATCH);
|
||||
anchor = ip;
|
||||
} }
|
||||
ZSTD_setLog2Prices(optStatePtr);
|
||||
} /* while (ip < ilimit) */
|
||||
|
||||
/* Return the last literals size */
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_btopt(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
|
||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 0 /*extDict*/);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btultra(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 0 /*extDict*/);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 1 /*extDict*/);
|
||||
}
|
||||
|
||||
size_t ZSTD_compressBlock_btultra_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
|
||||
{
|
||||
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 1 /*extDict*/);
|
||||
}
|
42
third/github.com/DataDog/zstd/zstd_opt.h
Normal file
42
third/github.com/DataDog/zstd/zstd_opt.h
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_OPT_H
|
||||
#define ZSTD_OPT_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
void ZSTD_updateTree(
|
||||
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
||||
const BYTE* ip, const BYTE* iend); /* used in ZSTD_loadDictionaryContent() */
|
||||
|
||||
size_t ZSTD_compressBlock_btopt(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_btopt_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btultra_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_OPT_H */
|
291
third/github.com/DataDog/zstd/zstd_stream.go
Normal file
291
third/github.com/DataDog/zstd/zstd_stream.go
Normal file
@ -0,0 +1,291 @@
|
||||
package zstd
|
||||
|
||||
/*
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#define ZBUFF_DISABLE_DEPRECATE_WARNINGS
|
||||
#include "zstd.h"
|
||||
#include "zbuff.h"
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var errShortRead = errors.New("short read")
|
||||
|
||||
// Writer is an io.WriteCloser that zstd-compresses its input.
|
||||
type Writer struct {
|
||||
CompressionLevel int
|
||||
|
||||
ctx *C.ZSTD_CCtx
|
||||
dict []byte
|
||||
dstBuffer []byte
|
||||
firstError error
|
||||
underlyingWriter io.Writer
|
||||
}
|
||||
|
||||
func resize(in []byte, newSize int) []byte {
|
||||
if in == nil {
|
||||
return make([]byte, newSize)
|
||||
}
|
||||
if newSize <= cap(in) {
|
||||
return in[:newSize]
|
||||
}
|
||||
toAdd := newSize - len(in)
|
||||
return append(in, make([]byte, toAdd)...)
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer with default compression options. Writes to
|
||||
// the writer will be written in compressed form to w.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return NewWriterLevelDict(w, DefaultCompression, nil)
|
||||
}
|
||||
|
||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
||||
// of assuming default compression.
|
||||
//
|
||||
// The level can be DefaultCompression or any integer value between BestSpeed
|
||||
// and BestCompression inclusive.
|
||||
func NewWriterLevel(w io.Writer, level int) *Writer {
|
||||
return NewWriterLevelDict(w, level, nil)
|
||||
|
||||
}
|
||||
|
||||
// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to
|
||||
// compress with. If the dictionary is empty or nil it is ignored. The dictionary
|
||||
// should not be modified until the writer is closed.
|
||||
func NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer {
|
||||
var err error
|
||||
ctx := C.ZSTD_createCCtx()
|
||||
|
||||
if dict == nil {
|
||||
err = getError(int(C.ZSTD_compressBegin(ctx,
|
||||
C.int(level))))
|
||||
} else {
|
||||
err = getError(int(C.ZSTD_compressBegin_usingDict(
|
||||
ctx,
|
||||
unsafe.Pointer(&dict[0]),
|
||||
C.size_t(len(dict)),
|
||||
C.int(level))))
|
||||
}
|
||||
|
||||
return &Writer{
|
||||
CompressionLevel: level,
|
||||
ctx: ctx,
|
||||
dict: dict,
|
||||
dstBuffer: make([]byte, CompressBound(1024)),
|
||||
firstError: err,
|
||||
underlyingWriter: w,
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes a compressed form of p to the underlying io.Writer.
|
||||
func (w *Writer) Write(p []byte) (int, error) {
|
||||
if w.firstError != nil {
|
||||
return 0, w.firstError
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
// Check if dstBuffer is enough
|
||||
if len(w.dstBuffer) < CompressBound(len(p)) {
|
||||
w.dstBuffer = make([]byte, CompressBound(len(p)))
|
||||
}
|
||||
|
||||
retCode := C.ZSTD_compressContinue(
|
||||
w.ctx,
|
||||
unsafe.Pointer(&w.dstBuffer[0]),
|
||||
C.size_t(len(w.dstBuffer)),
|
||||
unsafe.Pointer(&p[0]),
|
||||
C.size_t(len(p)))
|
||||
|
||||
if err := getError(int(retCode)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
written := int(retCode)
|
||||
|
||||
// Write to underlying buffer
|
||||
_, err := w.underlyingWriter.Write(w.dstBuffer[:written])
|
||||
|
||||
// Same behaviour as zlib, we can't know how much data we wrote, only
|
||||
// if there was an error
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// Close closes the Writer, flushing any unwritten data to the underlying
|
||||
// io.Writer and freeing objects, but does not close the underlying io.Writer.
|
||||
func (w *Writer) Close() error {
|
||||
retCode := C.ZSTD_compressEnd(
|
||||
w.ctx,
|
||||
unsafe.Pointer(&w.dstBuffer[0]),
|
||||
C.size_t(len(w.dstBuffer)),
|
||||
unsafe.Pointer(nil),
|
||||
C.size_t(0))
|
||||
|
||||
if err := getError(int(retCode)); err != nil {
|
||||
return err
|
||||
}
|
||||
written := int(retCode)
|
||||
retCode = C.ZSTD_freeCCtx(w.ctx) // Safely close buffer before writing the end
|
||||
|
||||
if err := getError(int(retCode)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := w.underlyingWriter.Write(w.dstBuffer[:written])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// reader is an io.ReadCloser that decompresses when read from.
|
||||
type reader struct {
|
||||
ctx *C.ZBUFF_DCtx
|
||||
compressionBuffer []byte
|
||||
compressionLeft int
|
||||
decompressionBuffer []byte
|
||||
decompOff int
|
||||
decompSize int
|
||||
dict []byte
|
||||
firstError error
|
||||
recommendedSrcSize int
|
||||
underlyingReader io.Reader
|
||||
}
|
||||
|
||||
// NewReader creates a new io.ReadCloser. Reads from the returned ReadCloser
|
||||
// read and decompress data from r. It is the caller's responsibility to call
|
||||
// Close on the ReadCloser when done. If this is not done, underlying objects
|
||||
// in the zstd library will not be freed.
|
||||
func NewReader(r io.Reader) io.ReadCloser {
|
||||
return NewReaderDict(r, nil)
|
||||
}
|
||||
|
||||
// NewReaderDict is like NewReader but uses a preset dictionary. NewReaderDict
|
||||
// ignores the dictionary if it is nil.
|
||||
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
|
||||
var err error
|
||||
ctx := C.ZBUFF_createDCtx()
|
||||
if len(dict) == 0 {
|
||||
err = getError(int(C.ZBUFF_decompressInit(ctx)))
|
||||
} else {
|
||||
err = getError(int(C.ZBUFF_decompressInitDictionary(
|
||||
ctx,
|
||||
unsafe.Pointer(&dict[0]),
|
||||
C.size_t(len(dict)))))
|
||||
}
|
||||
cSize := int(C.ZBUFF_recommendedDInSize())
|
||||
dSize := int(C.ZBUFF_recommendedDOutSize())
|
||||
if cSize <= 0 {
|
||||
panic(fmt.Errorf("ZBUFF_recommendedDInSize() returned invalid size: %v", cSize))
|
||||
}
|
||||
if dSize <= 0 {
|
||||
panic(fmt.Errorf("ZBUFF_recommendedDOutSize() returned invalid size: %v", dSize))
|
||||
}
|
||||
|
||||
compressionBuffer := make([]byte, cSize)
|
||||
decompressionBuffer := make([]byte, dSize)
|
||||
return &reader{
|
||||
ctx: ctx,
|
||||
dict: dict,
|
||||
compressionBuffer: compressionBuffer,
|
||||
decompressionBuffer: decompressionBuffer,
|
||||
firstError: err,
|
||||
recommendedSrcSize: cSize,
|
||||
underlyingReader: r,
|
||||
}
|
||||
}
|
||||
|
||||
// Close frees the allocated C objects
|
||||
func (r *reader) Close() error {
|
||||
return getError(int(C.ZBUFF_freeDCtx(r.ctx)))
|
||||
}
|
||||
|
||||
func (r *reader) Read(p []byte) (int, error) {
|
||||
|
||||
// If we already have enough bytes, return
|
||||
if r.decompSize-r.decompOff >= len(p) {
|
||||
copy(p, r.decompressionBuffer[r.decompOff:])
|
||||
r.decompOff += len(p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
copy(p, r.decompressionBuffer[r.decompOff:r.decompSize])
|
||||
got := r.decompSize - r.decompOff
|
||||
r.decompSize = 0
|
||||
r.decompOff = 0
|
||||
|
||||
for got < len(p) {
|
||||
// Populate src
|
||||
src := r.compressionBuffer
|
||||
reader := r.underlyingReader
|
||||
n, err := TryReadFull(reader, src[r.compressionLeft:])
|
||||
if err != nil && err != errShortRead { // Handle underlying reader errors first
|
||||
return 0, fmt.Errorf("failed to read from underlying reader: %s", err)
|
||||
} else if n == 0 && r.compressionLeft == 0 {
|
||||
return got, io.EOF
|
||||
}
|
||||
src = src[:r.compressionLeft+n]
|
||||
|
||||
// C code
|
||||
cSrcSize := C.size_t(len(src))
|
||||
cDstSize := C.size_t(len(r.decompressionBuffer))
|
||||
retCode := int(C.ZBUFF_decompressContinue(
|
||||
r.ctx,
|
||||
unsafe.Pointer(&r.decompressionBuffer[0]),
|
||||
&cDstSize,
|
||||
unsafe.Pointer(&src[0]),
|
||||
&cSrcSize))
|
||||
|
||||
if err = getError(retCode); err != nil {
|
||||
return 0, fmt.Errorf("failed to decompress: %s", err)
|
||||
}
|
||||
|
||||
// Put everything in buffer
|
||||
if int(cSrcSize) < len(src) {
|
||||
left := src[int(cSrcSize):]
|
||||
copy(r.compressionBuffer, left)
|
||||
}
|
||||
r.compressionLeft = len(src) - int(cSrcSize)
|
||||
r.decompSize = int(cDstSize)
|
||||
r.decompOff = copy(p[got:], r.decompressionBuffer[:r.decompSize])
|
||||
got += r.decompOff
|
||||
|
||||
// Resize buffers
|
||||
nsize := retCode // Hint for next src buffer size
|
||||
if nsize <= 0 {
|
||||
// Reset to recommended size
|
||||
nsize = r.recommendedSrcSize
|
||||
}
|
||||
if nsize < r.compressionLeft {
|
||||
nsize = r.compressionLeft
|
||||
}
|
||||
r.compressionBuffer = resize(r.compressionBuffer, nsize)
|
||||
}
|
||||
return got, nil
|
||||
}
|
||||
|
||||
// TryReadFull reads buffer just as ReadFull does
|
||||
// Here we expect that buffer may end and we do not return ErrUnexpectedEOF as ReadAtLeast does.
|
||||
// We return errShortRead instead to distinguish short reads and failures.
|
||||
// We cannot use ReadFull/ReadAtLeast because it masks Reader errors, such as network failures
|
||||
// and causes panic instead of error.
|
||||
func TryReadFull(r io.Reader, buf []byte) (n int, err error) {
|
||||
for n < len(buf) && err == nil {
|
||||
var nn int
|
||||
nn, err = r.Read(buf[n:])
|
||||
n += nn
|
||||
}
|
||||
if n == len(buf) && err == io.EOF {
|
||||
err = nil // EOF at the end is somewhat expected
|
||||
} else if err == io.EOF {
|
||||
err = errShortRead
|
||||
}
|
||||
return
|
||||
}
|
210
third/github.com/DataDog/zstd/zstd_stream_test.go
Normal file
210
third/github.com/DataDog/zstd/zstd_stream_test.go
Normal file
@ -0,0 +1,210 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func failOnError(t *testing.T, msg string, err error) {
|
||||
if err != nil {
|
||||
debug.PrintStack()
|
||||
t.Fatalf("%s: %s", msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func testCompressionDecompression(t *testing.T, dict []byte, payload []byte) {
|
||||
var w bytes.Buffer
|
||||
writer := NewWriterLevelDict(&w, DefaultCompression, dict)
|
||||
_, err := writer.Write(payload)
|
||||
failOnError(t, "Failed writing to compress object", err)
|
||||
failOnError(t, "Failed to close compress object", writer.Close())
|
||||
out := w.Bytes()
|
||||
t.Logf("Compressed %v -> %v bytes", len(payload), len(out))
|
||||
failOnError(t, "Failed compressing", err)
|
||||
rr := bytes.NewReader(out)
|
||||
// Check that we can decompress with Decompress()
|
||||
decompressed, err := Decompress(nil, out)
|
||||
failOnError(t, "Failed to decompress with Decompress()", err)
|
||||
if string(payload) != string(decompressed) {
|
||||
t.Fatalf("Payload did not match, lengths: %v & %v", len(payload), len(decompressed))
|
||||
}
|
||||
|
||||
// Decompress
|
||||
r := NewReaderDict(rr, dict)
|
||||
dst := make([]byte, len(payload))
|
||||
n, err := r.Read(dst)
|
||||
if err != nil {
|
||||
failOnError(t, "Failed to read for decompression", err)
|
||||
}
|
||||
dst = dst[:n]
|
||||
if string(payload) != string(dst) { // Only print if we can print
|
||||
if len(payload) < 100 && len(dst) < 100 {
|
||||
t.Fatalf("Cannot compress and decompress: %s != %s", payload, dst)
|
||||
} else {
|
||||
t.Fatalf("Cannot compress and decompress (lengths: %v bytes & %v bytes)", len(payload), len(dst))
|
||||
}
|
||||
}
|
||||
// Check EOF
|
||||
n, err = r.Read(dst)
|
||||
if err != io.EOF && len(dst) > 0 { // If we want 0 bytes, that should work
|
||||
t.Fatalf("Error should have been EOF, was %s instead: (%v bytes read: %s)", err, n, dst[:n])
|
||||
}
|
||||
failOnError(t, "Failed to close decompress object", r.Close())
|
||||
}
|
||||
|
||||
func TestResize(t *testing.T) {
|
||||
if len(resize(nil, 129)) != 129 {
|
||||
t.Fatalf("Cannot allocate new slice")
|
||||
}
|
||||
a := make([]byte, 1, 200)
|
||||
b := resize(a, 129)
|
||||
if &a[0] != &b[0] {
|
||||
t.Fatalf("Address changed")
|
||||
}
|
||||
if len(b) != 129 {
|
||||
t.Fatalf("Wrong size")
|
||||
}
|
||||
c := make([]byte, 5, 10)
|
||||
d := resize(c, 129)
|
||||
if len(d) != 129 {
|
||||
t.Fatalf("Cannot allocate a new slice")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamSimpleCompressionDecompression(t *testing.T) {
|
||||
testCompressionDecompression(t, nil, []byte("Hello world!"))
|
||||
}
|
||||
|
||||
func TestStreamEmptySlice(t *testing.T) {
|
||||
testCompressionDecompression(t, nil, []byte{})
|
||||
}
|
||||
|
||||
func TestZstdReaderLong(t *testing.T) {
|
||||
var long bytes.Buffer
|
||||
for i := 0; i < 10000; i++ {
|
||||
long.Write([]byte("Hellow World!"))
|
||||
}
|
||||
testCompressionDecompression(t, nil, long.Bytes())
|
||||
}
|
||||
|
||||
func TestStreamCompressionDecompression(t *testing.T) {
|
||||
payload := []byte("Hello World!")
|
||||
repeat := 10000
|
||||
var intermediate bytes.Buffer
|
||||
w := NewWriterLevel(&intermediate, 4)
|
||||
for i := 0; i < repeat; i++ {
|
||||
_, err := w.Write(payload)
|
||||
failOnError(t, "Failed writing to compress object", err)
|
||||
}
|
||||
w.Close()
|
||||
// Decompress
|
||||
r := NewReader(&intermediate)
|
||||
dst := make([]byte, len(payload))
|
||||
for i := 0; i < repeat; i++ {
|
||||
n, err := r.Read(dst)
|
||||
failOnError(t, "Failed to decompress", err)
|
||||
if n != len(payload) {
|
||||
t.Fatalf("Did not read enough bytes: %v != %v", n, len(payload))
|
||||
}
|
||||
if string(dst) != string(payload) {
|
||||
t.Fatalf("Did not read the same %s != %s", string(dst), string(payload))
|
||||
}
|
||||
}
|
||||
// Check EOF
|
||||
n, err := r.Read(dst)
|
||||
if err != io.EOF {
|
||||
t.Fatalf("Error should have been EOF, was %s instead: (%v bytes read: %s)", err, n, dst[:n])
|
||||
}
|
||||
failOnError(t, "Failed to close decompress object", r.Close())
|
||||
}
|
||||
|
||||
func TestStreamRealPayload(t *testing.T) {
|
||||
if raw == nil {
|
||||
t.Skip(ErrNoPayloadEnv)
|
||||
}
|
||||
testCompressionDecompression(t, nil, raw)
|
||||
}
|
||||
|
||||
func TestStreamEmptyPayload(t *testing.T) {
|
||||
w := bytes.NewBuffer(nil)
|
||||
writer := NewWriter(w)
|
||||
_, err := writer.Write(nil)
|
||||
failOnError(t, "failed to write empty slice", err)
|
||||
err = writer.Close()
|
||||
failOnError(t, "failed to close", err)
|
||||
compressed := w.Bytes()
|
||||
t.Logf("compressed buffer: 0x%x", compressed)
|
||||
// Now recheck that if we decompress, we get empty slice
|
||||
r := bytes.NewBuffer(compressed)
|
||||
reader := NewReader(r)
|
||||
decompressed, err := ioutil.ReadAll(reader)
|
||||
failOnError(t, "failed to read", err)
|
||||
err = reader.Close()
|
||||
failOnError(t, "failed to close", err)
|
||||
if string(decompressed) != "" {
|
||||
t.Fatalf("Expected empty slice as decompressed, got %v instead", decompressed)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStreamCompression(b *testing.B) {
|
||||
if raw == nil {
|
||||
b.Fatal(ErrNoPayloadEnv)
|
||||
}
|
||||
var intermediate bytes.Buffer
|
||||
w := NewWriter(&intermediate)
|
||||
defer w.Close()
|
||||
b.SetBytes(int64(len(raw)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := w.Write(raw)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed writing to compress object: %s", err)
|
||||
}
|
||||
// Prevent from unbound buffer growth.
|
||||
intermediate.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStreamDecompression(b *testing.B) {
|
||||
if raw == nil {
|
||||
b.Fatal(ErrNoPayloadEnv)
|
||||
}
|
||||
compressed, err := Compress(nil, raw)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to compress: %s", err)
|
||||
}
|
||||
_, err = Decompress(nil, compressed)
|
||||
if err != nil {
|
||||
b.Fatalf("Problem: %s", err)
|
||||
}
|
||||
|
||||
dst := make([]byte, len(raw))
|
||||
b.SetBytes(int64(len(raw)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
rr := bytes.NewReader(compressed)
|
||||
r := NewReader(rr)
|
||||
_, err := r.Read(dst)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to decompress: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type breakingReader struct {
|
||||
}
|
||||
|
||||
func (r *breakingReader) Read(p []byte) (int, error) {
|
||||
return len(p) - 1, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
func TestUnexpectedEOFHandling(t *testing.T) {
|
||||
r := NewReader(&breakingReader{})
|
||||
_, err := r.Read(make([]byte, 1024))
|
||||
if err == nil {
|
||||
t.Error("Underlying error was handled silently")
|
||||
}
|
||||
}
|
203
third/github.com/DataDog/zstd/zstd_test.go
Normal file
203
third/github.com/DataDog/zstd/zstd_test.go
Normal file
@ -0,0 +1,203 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var raw []byte
|
||||
var (
|
||||
ErrNoPayloadEnv = errors.New("PAYLOAD env was not set")
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
payload := os.Getenv("PAYLOAD")
|
||||
if len(payload) > 0 {
|
||||
raw, err = ioutil.ReadFile(payload)
|
||||
if err != nil {
|
||||
fmt.Printf("Error opening payload: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test our version of compress bound vs C implementation
|
||||
func TestCompressBound(t *testing.T) {
|
||||
tests := []int{0, 1, 2, 10, 456, 15468, 1313, 512, 2147483632}
|
||||
for _, test := range tests {
|
||||
if CompressBound(test) != cCompressBound(test) {
|
||||
t.Fatalf("For %v, results are different: %v (actual) != %v (expected)", test,
|
||||
CompressBound(test), cCompressBound(test))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test error code
|
||||
func TestErrorCode(t *testing.T) {
|
||||
tests := make([]int, 211)
|
||||
for i := 0; i < len(tests); i++ {
|
||||
tests[i] = i - 105
|
||||
}
|
||||
for _, test := range tests {
|
||||
err := getError(test)
|
||||
if err == nil && cIsError(test) {
|
||||
t.Fatalf("C function returned error for %v but ours did not", test)
|
||||
} else if err != nil && !cIsError(test) {
|
||||
t.Fatalf("Ours function returned error for %v but C one did not", test)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test compression
|
||||
func TestCompressDecompress(t *testing.T) {
|
||||
input := []byte("Hello World!")
|
||||
out, err := Compress(nil, input)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while compressing: %v", err)
|
||||
}
|
||||
out2 := make([]byte, 1000)
|
||||
out2, err = Compress(out2, input)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while compressing: %v", err)
|
||||
}
|
||||
t.Logf("Compressed: %v", out)
|
||||
rein, err := Decompress(nil, out)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while decompressing: %v", err)
|
||||
}
|
||||
rein2 := make([]byte, 10)
|
||||
rein2, err = Decompress(rein2, out2)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while decompressing: %v", err)
|
||||
}
|
||||
|
||||
if string(input) != string(rein) {
|
||||
t.Fatalf("Cannot compress and decompress: %s != %s", input, rein)
|
||||
}
|
||||
if string(input) != string(rein2) {
|
||||
t.Fatalf("Cannot compress and decompress: %s != %s", input, rein)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptySliceCompress(t *testing.T) {
|
||||
compressed, err := Compress(nil, []byte{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while compressing: %v", err)
|
||||
}
|
||||
t.Logf("Compressing empty slice gives 0x%x", compressed)
|
||||
decompressed, err := Decompress(nil, compressed)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while compressing: %v", err)
|
||||
}
|
||||
if string(decompressed) != "" {
|
||||
t.Fatalf("Expected empty slice as decompressed, got %v instead", decompressed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptySliceDecompress(t *testing.T) {
|
||||
_, err := Decompress(nil, []byte{})
|
||||
if err != ErrEmptySlice {
|
||||
t.Fatalf("Did not get the correct error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecompressZeroLengthBuf(t *testing.T) {
|
||||
input := []byte("Hello World!")
|
||||
out, err := Compress(nil, input)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while compressing: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 0)
|
||||
decompressed, err := Decompress(buf, out)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while decompressing: %v", err)
|
||||
}
|
||||
|
||||
if res, exp := string(input), string(decompressed); res != exp {
|
||||
t.Fatalf("expected %s but decompressed to %s", exp, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTooSmall(t *testing.T) {
|
||||
var long bytes.Buffer
|
||||
for i := 0; i < 10000; i++ {
|
||||
long.Write([]byte("Hellow World!"))
|
||||
}
|
||||
input := long.Bytes()
|
||||
out, err := Compress(nil, input)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while compressing: %v", err)
|
||||
}
|
||||
rein := make([]byte, 1)
|
||||
// This should switch to the decompression stream to handle too small dst
|
||||
rein, err = Decompress(rein, out)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed decompressing: %s", err)
|
||||
}
|
||||
if string(input) != string(rein) {
|
||||
t.Fatalf("Cannot compress and decompress: %s != %s", input, rein)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRealPayload(t *testing.T) {
|
||||
if raw == nil {
|
||||
t.Skip(ErrNoPayloadEnv)
|
||||
}
|
||||
dst, err := Compress(nil, raw)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to compress: %s", err)
|
||||
}
|
||||
rein, err := Decompress(nil, dst)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decompress: %s", err)
|
||||
}
|
||||
if string(raw) != string(rein) {
|
||||
t.Fatalf("compressed/decompressed payloads are not the same (lengths: %v & %v)", len(raw), len(rein))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompression(b *testing.B) {
|
||||
if raw == nil {
|
||||
b.Fatal(ErrNoPayloadEnv)
|
||||
}
|
||||
dst := make([]byte, CompressBound(len(raw)))
|
||||
b.SetBytes(int64(len(raw)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := Compress(dst, raw)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed compressing: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDecompression(b *testing.B) {
|
||||
if raw == nil {
|
||||
b.Fatal(ErrNoPayloadEnv)
|
||||
}
|
||||
src := make([]byte, len(raw))
|
||||
dst, err := Compress(nil, raw)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed compressing: %s", err)
|
||||
}
|
||||
b.Logf("Reduced from %v to %v", len(raw), len(dst))
|
||||
b.SetBytes(int64(len(raw)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
src2, err := Decompress(src, dst)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed decompressing: %s", err)
|
||||
}
|
||||
b.StopTimer()
|
||||
if !bytes.Equal(raw, src2) {
|
||||
b.Fatalf("Results are not the same: %v != %v", len(raw), len(src2))
|
||||
}
|
||||
b.StartTimer()
|
||||
}
|
||||
}
|
2127
third/github.com/DataDog/zstd/zstd_v01.c
Normal file
2127
third/github.com/DataDog/zstd/zstd_v01.c
Normal file
File diff suppressed because it is too large
Load Diff
89
third/github.com/DataDog/zstd/zstd_v01.h
Normal file
89
third/github.com/DataDog/zstd/zstd_v01.h
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_V01_H_28739879432
|
||||
#define ZSTD_V01_H_28739879432
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple one-step function
|
||||
***************************************/
|
||||
/**
|
||||
ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format
|
||||
compressedSize : is the exact source size
|
||||
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
|
||||
It must be equal or larger than originalSize, otherwise decompression will fail.
|
||||
return : the number of bytes decompressed into destination buffer (originalSize)
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv01_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.1.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv01_isError(size_t code);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Advanced functions
|
||||
***************************************/
|
||||
typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx;
|
||||
ZSTDv01_Dctx* ZSTDv01_createDCtx(void);
|
||||
size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv01_decompressDCtx(void* ctx,
|
||||
void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx);
|
||||
size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
|
||||
/**
|
||||
Use above functions alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
|
||||
Result is the number of bytes regenerated within 'dst'.
|
||||
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
|
||||
*/
|
||||
|
||||
/* *************************************
|
||||
* Prefix - version detection
|
||||
***************************************/
|
||||
#define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */
|
||||
#define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_V01_H_28739879432 */
|
3483
third/github.com/DataDog/zstd/zstd_v02.c
Normal file
3483
third/github.com/DataDog/zstd/zstd_v02.c
Normal file
File diff suppressed because it is too large
Load Diff
88
third/github.com/DataDog/zstd/zstd_v02.h
Normal file
88
third/github.com/DataDog/zstd/zstd_v02.h
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_V02_H_4174539423
|
||||
#define ZSTD_V02_H_4174539423
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple one-step function
|
||||
***************************************/
|
||||
/**
|
||||
ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format
|
||||
compressedSize : is the exact source size
|
||||
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
|
||||
It must be equal or larger than originalSize, otherwise decompression will fail.
|
||||
return : the number of bytes decompressed into destination buffer (originalSize)
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv02_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.2.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv02_isError())
|
||||
*/
|
||||
size_t ZSTDv02_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv02_isError(size_t code);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Advanced functions
|
||||
***************************************/
|
||||
typedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx;
|
||||
ZSTDv02_Dctx* ZSTDv02_createDCtx(void);
|
||||
size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv02_decompressDCtx(void* ctx,
|
||||
void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx);
|
||||
size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
|
||||
/**
|
||||
Use above functions alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
|
||||
Result is the number of bytes regenerated within 'dst'.
|
||||
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
|
||||
*/
|
||||
|
||||
/* *************************************
|
||||
* Prefix - version detection
|
||||
***************************************/
|
||||
#define ZSTDv02_magicNumber 0xFD2FB522 /* v0.2 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_V02_H_4174539423 */
|
3124
third/github.com/DataDog/zstd/zstd_v03.c
Normal file
3124
third/github.com/DataDog/zstd/zstd_v03.c
Normal file
File diff suppressed because it is too large
Load Diff
88
third/github.com/DataDog/zstd/zstd_v03.h
Normal file
88
third/github.com/DataDog/zstd/zstd_v03.h
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_V03_H_298734209782
|
||||
#define ZSTD_V03_H_298734209782
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple one-step function
|
||||
***************************************/
|
||||
/**
|
||||
ZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format
|
||||
compressedSize : is the exact source size
|
||||
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
|
||||
It must be equal or larger than originalSize, otherwise decompression will fail.
|
||||
return : the number of bytes decompressed into destination buffer (originalSize)
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv03_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.3.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv03_isError())
|
||||
*/
|
||||
size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv03_isError(size_t code);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Advanced functions
|
||||
***************************************/
|
||||
typedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx;
|
||||
ZSTDv03_Dctx* ZSTDv03_createDCtx(void);
|
||||
size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv03_decompressDCtx(void* ctx,
|
||||
void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx);
|
||||
size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
|
||||
/**
|
||||
Use above functions alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
|
||||
Result is the number of bytes regenerated within 'dst'.
|
||||
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
|
||||
*/
|
||||
|
||||
/* *************************************
|
||||
* Prefix - version detection
|
||||
***************************************/
|
||||
#define ZSTDv03_magicNumber 0xFD2FB523 /* v0.3 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_V03_H_298734209782 */
|
3677
third/github.com/DataDog/zstd/zstd_v04.c
Normal file
3677
third/github.com/DataDog/zstd/zstd_v04.c
Normal file
File diff suppressed because it is too large
Load Diff
137
third/github.com/DataDog/zstd/zstd_v04.h
Normal file
137
third/github.com/DataDog/zstd/zstd_v04.h
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_V04_H_91868324769238
|
||||
#define ZSTD_V04_H_91868324769238
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* *************************************
|
||||
* Includes
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple one-step function
|
||||
***************************************/
|
||||
/**
|
||||
ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format
|
||||
compressedSize : is the exact source size
|
||||
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
|
||||
It must be equal or larger than originalSize, otherwise decompression will fail.
|
||||
return : the number of bytes decompressed into destination buffer (originalSize)
|
||||
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
|
||||
*/
|
||||
size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv04_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.4.x format
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv04_isError())
|
||||
*/
|
||||
size_t ZSTDv04_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
|
||||
*/
|
||||
unsigned ZSTDv04_isError(size_t code);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Advanced functions
|
||||
***************************************/
|
||||
typedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx;
|
||||
ZSTDv04_Dctx* ZSTDv04_createDCtx(void);
|
||||
size_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx,
|
||||
void* dst, size_t maxOriginalSize,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Direct Streaming
|
||||
***************************************/
|
||||
size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx);
|
||||
|
||||
size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx);
|
||||
size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
|
||||
/**
|
||||
Use above functions alternatively.
|
||||
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
|
||||
Result is the number of bytes regenerated within 'dst'.
|
||||
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
|
||||
*/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Buffered Streaming
|
||||
***************************************/
|
||||
typedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx;
|
||||
ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void);
|
||||
size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx);
|
||||
|
||||
size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx);
|
||||
size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr);
|
||||
|
||||
/** ************************************************
|
||||
* Streaming decompression
|
||||
*
|
||||
* A ZBUFF_DCtx object is required to track streaming operation.
|
||||
* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
|
||||
* Use ZBUFF_decompressInit() to start a new decompression operation.
|
||||
* ZBUFF_DCtx objects can be reused multiple times.
|
||||
*
|
||||
* Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary()
|
||||
* It must be the same content as the one set during compression phase.
|
||||
* Dictionary content must remain accessible during the decompression process.
|
||||
*
|
||||
* Use ZBUFF_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *maxDstSizePtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst.
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
|
||||
* or 0 when a frame is completely decoded
|
||||
* or an error code, which can be tested using ZBUFF_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize
|
||||
* output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
|
||||
* input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* **************************************************/
|
||||
unsigned ZBUFFv04_isError(size_t errorCode);
|
||||
const char* ZBUFFv04_getErrorName(size_t errorCode);
|
||||
|
||||
|
||||
/** The below functions provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are not compulsory, they just tend to offer better latency */
|
||||
size_t ZBUFFv04_recommendedDInSize(void);
|
||||
size_t ZBUFFv04_recommendedDOutSize(void);
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Prefix - version detection
|
||||
***************************************/
|
||||
#define ZSTDv04_magicNumber 0xFD2FB524 /* v0.4 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_V04_H_91868324769238 */
|
4011
third/github.com/DataDog/zstd/zstd_v05.c
Normal file
4011
third/github.com/DataDog/zstd/zstd_v05.c
Normal file
File diff suppressed because it is too large
Load Diff
157
third/github.com/DataDog/zstd/zstd_v05.h
Normal file
157
third/github.com/DataDog/zstd/zstd_v05.h
Normal file
@ -0,0 +1,157 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDv05_H
|
||||
#define ZSTDv05_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include <stddef.h> /* size_t */
|
||||
#include "mem.h" /* U64, U32 */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple functions
|
||||
***************************************/
|
||||
/*! ZSTDv05_decompress() :
|
||||
`compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
|
||||
`dstCapacity` must be large enough, equal or larger than originalSize.
|
||||
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
|
||||
or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */
|
||||
size_t ZSTDv05_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv05_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv05_isError())
|
||||
*/
|
||||
size_t ZSTDv05_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
/* Error Management */
|
||||
unsigned ZSTDv05_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
const char* ZSTDv05_getErrorName(size_t code); /*!< provides readable string for an error code */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Explicit memory management
|
||||
***************************************/
|
||||
/** Decompression context */
|
||||
typedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx;
|
||||
ZSTDv05_DCtx* ZSTDv05_createDCtx(void);
|
||||
size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx); /*!< @return : errorCode */
|
||||
|
||||
/** ZSTDv05_decompressDCtx() :
|
||||
* Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */
|
||||
size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*-***********************
|
||||
* Simple Dictionary API
|
||||
*************************/
|
||||
/*! ZSTDv05_decompress_usingDict() :
|
||||
* Decompression using a pre-defined Dictionary content (see dictBuilder).
|
||||
* Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
|
||||
* Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */
|
||||
size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize);
|
||||
|
||||
/*-************************
|
||||
* Advanced Streaming API
|
||||
***************************/
|
||||
typedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy;
|
||||
typedef struct {
|
||||
U64 srcSize;
|
||||
U32 windowLog; /* the only useful information to retrieve */
|
||||
U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy;
|
||||
} ZSTDv05_parameters;
|
||||
size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize);
|
||||
|
||||
size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx);
|
||||
size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx);
|
||||
size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*-***********************
|
||||
* ZBUFF API
|
||||
*************************/
|
||||
typedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx;
|
||||
ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void);
|
||||
size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx);
|
||||
|
||||
size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx);
|
||||
size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr);
|
||||
|
||||
/*-***************************************************************************
|
||||
* Streaming decompression
|
||||
*
|
||||
* A ZBUFFv05_DCtx object is required to track streaming operations.
|
||||
* Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
|
||||
* Use ZBUFFv05_decompressInit() to start a new decompression operation,
|
||||
* or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary.
|
||||
* Note that ZBUFFv05_DCtx objects can be reused multiple times.
|
||||
*
|
||||
* Use ZBUFFv05_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst.
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency)
|
||||
* or 0 when a frame is completely decoded
|
||||
* or an error code, which can be tested using ZBUFFv05_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize()
|
||||
* output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
|
||||
* input : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* *******************************************************************************/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
unsigned ZBUFFv05_isError(size_t errorCode);
|
||||
const char* ZBUFFv05_getErrorName(size_t errorCode);
|
||||
|
||||
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are just hints, and tend to offer better latency */
|
||||
size_t ZBUFFv05_recommendedDInSize(void);
|
||||
size_t ZBUFFv05_recommendedDOutSize(void);
|
||||
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
#define ZSTDv05_MAGICNUMBER 0xFD2FB525 /* v0.5 */
|
||||
|
||||
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDv0505_H */
|
4124
third/github.com/DataDog/zstd/zstd_v06.c
Normal file
4124
third/github.com/DataDog/zstd/zstd_v06.c
Normal file
File diff suppressed because it is too large
Load Diff
167
third/github.com/DataDog/zstd/zstd_v06.h
Normal file
167
third/github.com/DataDog/zstd/zstd_v06.h
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDv06_H
|
||||
#define ZSTDv06_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*====== Dependency ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/*====== Export for Windows ======*/
|
||||
/*!
|
||||
* ZSTDv06_DLL_EXPORT :
|
||||
* Enable exporting of functions when building a Windows DLL
|
||||
*/
|
||||
#if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1)
|
||||
# define ZSTDLIBv06_API __declspec(dllexport)
|
||||
#else
|
||||
# define ZSTDLIBv06_API
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple functions
|
||||
***************************************/
|
||||
/*! ZSTDv06_decompress() :
|
||||
`compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
|
||||
`dstCapacity` must be large enough, equal or larger than originalSize.
|
||||
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
|
||||
or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv06_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv06_isError())
|
||||
*/
|
||||
size_t ZSTDv06_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/* *************************************
|
||||
* Helper functions
|
||||
***************************************/
|
||||
ZSTDLIBv06_API size_t ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */
|
||||
|
||||
/* Error Management */
|
||||
ZSTDLIBv06_API unsigned ZSTDv06_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
ZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code); /*!< provides readable string for an error code */
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Explicit memory management
|
||||
***************************************/
|
||||
/** Decompression context */
|
||||
typedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx;
|
||||
ZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void);
|
||||
ZSTDLIBv06_API size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx); /*!< @return : errorCode */
|
||||
|
||||
/** ZSTDv06_decompressDCtx() :
|
||||
* Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*-***********************
|
||||
* Dictionary API
|
||||
*************************/
|
||||
/*! ZSTDv06_decompress_usingDict() :
|
||||
* Decompression using a pre-defined Dictionary content (see dictBuilder).
|
||||
* Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
|
||||
* Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize);
|
||||
|
||||
|
||||
/*-************************
|
||||
* Advanced Streaming API
|
||||
***************************/
|
||||
struct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; };
|
||||
typedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams;
|
||||
|
||||
ZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
ZSTDLIBv06_API void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx);
|
||||
|
||||
ZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx);
|
||||
ZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* ZBUFF API
|
||||
***************************************/
|
||||
|
||||
typedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx;
|
||||
ZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void);
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx);
|
||||
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx);
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr);
|
||||
|
||||
/*-***************************************************************************
|
||||
* Streaming decompression howto
|
||||
*
|
||||
* A ZBUFFv06_DCtx object is required to track streaming operations.
|
||||
* Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
|
||||
* Use ZBUFFv06_decompressInit() to start a new decompression operation,
|
||||
* or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
|
||||
* Note that ZBUFFv06_DCtx objects can be re-init multiple times.
|
||||
*
|
||||
* Use ZBUFFv06_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
|
||||
* or 0 when a frame is completely decoded,
|
||||
* or an error code, which can be tested using ZBUFFv06_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
|
||||
* output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
|
||||
* input : ZBUFFv06_recommendedDInSize == 128KB + 3;
|
||||
* just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* *******************************************************************************/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
ZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode);
|
||||
ZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode);
|
||||
|
||||
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are just hints, they tend to offer better latency */
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void);
|
||||
ZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void);
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
#define ZSTDv06_MAGICNUMBER 0xFD2FB526 /* v0.6 */
|
||||
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDv06_BUFFERED_H */
|
4502
third/github.com/DataDog/zstd/zstd_v07.c
Normal file
4502
third/github.com/DataDog/zstd/zstd_v07.c
Normal file
File diff suppressed because it is too large
Load Diff
182
third/github.com/DataDog/zstd/zstd_v07.h
Normal file
182
third/github.com/DataDog/zstd/zstd_v07.h
Normal file
@ -0,0 +1,182 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDv07_H_235446
|
||||
#define ZSTDv07_H_235446
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*====== Dependency ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/*====== Export for Windows ======*/
|
||||
/*!
|
||||
* ZSTDv07_DLL_EXPORT :
|
||||
* Enable exporting of functions when building a Windows DLL
|
||||
*/
|
||||
#if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1)
|
||||
# define ZSTDLIBv07_API __declspec(dllexport)
|
||||
#else
|
||||
# define ZSTDLIBv07_API
|
||||
#endif
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Simple API
|
||||
***************************************/
|
||||
/*! ZSTDv07_getDecompressedSize() :
|
||||
* @return : decompressed size if known, 0 otherwise.
|
||||
note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause.
|
||||
note 2 : decompressed size could be wrong or intentionally modified !
|
||||
always ensure results fit within application's authorized limits */
|
||||
unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);
|
||||
|
||||
/*! ZSTDv07_decompress() :
|
||||
`compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail.
|
||||
`dstCapacity` must be equal or larger than originalSize.
|
||||
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
|
||||
or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,
|
||||
const void* src, size_t compressedSize);
|
||||
|
||||
/**
|
||||
ZSTDv07_getFrameSrcSize() : get the source length of a ZSTD frame
|
||||
compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
|
||||
return : the number of bytes that would be read to decompress this frame
|
||||
or an errorCode if it fails (which can be tested using ZSTDv07_isError())
|
||||
*/
|
||||
size_t ZSTDv07_findFrameCompressedSize(const void* src, size_t compressedSize);
|
||||
|
||||
/*====== Helper functions ======*/
|
||||
ZSTDLIBv07_API unsigned ZSTDv07_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code); /*!< provides readable string from an error code */
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Explicit memory management
|
||||
***************************************/
|
||||
/** Decompression context */
|
||||
typedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx;
|
||||
ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void);
|
||||
ZSTDLIBv07_API size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx); /*!< @return : errorCode */
|
||||
|
||||
/** ZSTDv07_decompressDCtx() :
|
||||
* Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*-************************
|
||||
* Simple dictionary API
|
||||
***************************/
|
||||
/*! ZSTDv07_decompress_usingDict() :
|
||||
* Decompression using a pre-defined Dictionary content (see dictBuilder).
|
||||
* Dictionary must be identical to the one used during compression.
|
||||
* Note : This function load the dictionary, resulting in a significant startup time */
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize);
|
||||
|
||||
|
||||
/*-**************************
|
||||
* Advanced Dictionary API
|
||||
****************************/
|
||||
/*! ZSTDv07_createDDict() :
|
||||
* Create a digested dictionary, ready to start decompression operation without startup delay.
|
||||
* `dict` can be released after creation */
|
||||
typedef struct ZSTDv07_DDict_s ZSTDv07_DDict;
|
||||
ZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize);
|
||||
ZSTDLIBv07_API size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict);
|
||||
|
||||
/*! ZSTDv07_decompress_usingDDict() :
|
||||
* Decompression using a pre-digested Dictionary
|
||||
* Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */
|
||||
ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTDv07_DDict* ddict);
|
||||
|
||||
typedef struct {
|
||||
unsigned long long frameContentSize;
|
||||
unsigned windowSize;
|
||||
unsigned dictID;
|
||||
unsigned checksumFlag;
|
||||
} ZSTDv07_frameParams;
|
||||
|
||||
ZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */
|
||||
|
||||
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Streaming functions
|
||||
***************************************/
|
||||
typedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx;
|
||||
ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void);
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx);
|
||||
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx);
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize);
|
||||
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx,
|
||||
void* dst, size_t* dstCapacityPtr,
|
||||
const void* src, size_t* srcSizePtr);
|
||||
|
||||
/*-***************************************************************************
|
||||
* Streaming decompression howto
|
||||
*
|
||||
* A ZBUFFv07_DCtx object is required to track streaming operations.
|
||||
* Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
|
||||
* Use ZBUFFv07_decompressInit() to start a new decompression operation,
|
||||
* or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
|
||||
* Note that ZBUFFv07_DCtx objects can be re-init multiple times.
|
||||
*
|
||||
* Use ZBUFFv07_decompressContinue() repetitively to consume your input.
|
||||
* *srcSizePtr and *dstCapacityPtr can be any size.
|
||||
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
|
||||
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
|
||||
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
|
||||
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
|
||||
* or 0 when a frame is completely decoded,
|
||||
* or an error code, which can be tested using ZBUFFv07_isError().
|
||||
*
|
||||
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
|
||||
* output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
|
||||
* input : ZBUFFv07_recommendedDInSize == 128KB + 3;
|
||||
* just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
|
||||
* *******************************************************************************/
|
||||
|
||||
|
||||
/* *************************************
|
||||
* Tool functions
|
||||
***************************************/
|
||||
ZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode);
|
||||
ZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode);
|
||||
|
||||
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
|
||||
* These sizes are just hints, they tend to offer better latency */
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void);
|
||||
ZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void);
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
#define ZSTDv07_MAGICNUMBER 0xFD2FB527 /* v0.7 */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDv07_H_235446 */
|
1831
third/github.com/DataDog/zstd/zstdmt_compress.c
Executable file
1831
third/github.com/DataDog/zstd/zstdmt_compress.c
Executable file
File diff suppressed because it is too large
Load Diff
156
third/github.com/DataDog/zstd/zstdmt_compress.h
Executable file
156
third/github.com/DataDog/zstd/zstdmt_compress.h
Executable file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTDMT_COMPRESS_H
|
||||
#define ZSTDMT_COMPRESS_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* Note : This is an internal API.
|
||||
* Some methods are still exposed (ZSTDLIB_API),
|
||||
* because it used to be the only way to invoke MT compression.
|
||||
* Now, it's recommended to use ZSTD_compress_generic() instead.
|
||||
* These methods will stop being exposed in a future version */
|
||||
|
||||
/* === Dependencies === */
|
||||
#include <stddef.h> /* size_t */
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
|
||||
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
|
||||
|
||||
|
||||
/* === Memory management === */
|
||||
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
|
||||
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
|
||||
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
|
||||
ZSTD_customMem cMem);
|
||||
ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
|
||||
/* === Simple one-pass compression function === */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
int compressionLevel);
|
||||
|
||||
|
||||
|
||||
/* === Streaming functions === */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
|
||||
ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
|
||||
|
||||
|
||||
/* === Advanced functions and parameters === */
|
||||
|
||||
#ifndef ZSTDMT_JOBSIZE_MIN
|
||||
# define ZSTDMT_JOBSIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */
|
||||
#endif
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_parameters params,
|
||||
unsigned overlapLog);
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
||||
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
|
||||
ZSTD_parameters params,
|
||||
unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
|
||||
|
||||
ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_frameParameters fparams,
|
||||
unsigned long long pledgedSrcSize); /* note : zero means empty */
|
||||
|
||||
/* ZSTDMT_parameter :
|
||||
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
|
||||
typedef enum {
|
||||
ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
|
||||
ZSTDMT_p_overlapSectionLog /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
|
||||
} ZSTDMT_parameter;
|
||||
|
||||
/* ZSTDMT_setMTCtxParameter() :
|
||||
* allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
|
||||
* The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
|
||||
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
|
||||
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
|
||||
ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value);
|
||||
|
||||
|
||||
/*! ZSTDMT_compressStream_generic() :
|
||||
* Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
|
||||
* depending on flush directive.
|
||||
* @return : minimum amount of data still to be flushed
|
||||
* 0 if fully flushed
|
||||
* or an error code
|
||||
* note : needs to be init using any ZSTD_initCStream*() variant */
|
||||
ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
||||
ZSTD_outBuffer* output,
|
||||
ZSTD_inBuffer* input,
|
||||
ZSTD_EndDirective endOp);
|
||||
|
||||
|
||||
/* ========================================================
|
||||
* === Private interface, for use by ZSTD_compress.c ===
|
||||
* === Not exposed in libzstd. Never invoke directly ===
|
||||
* ======================================================== */
|
||||
|
||||
size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
|
||||
|
||||
/* ZSTDMT_CCtxParam_setNbWorkers()
|
||||
* Set nbWorkers, and clamp it.
|
||||
* Also reset jobSize and overlapLog */
|
||||
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
|
||||
|
||||
/*! ZSTDMT_updateCParams_whileCompressing() :
|
||||
* Updates only a selected set of compression parameters, to remain compatible with current frame.
|
||||
* New parameters will be applied to next compression job. */
|
||||
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
|
||||
|
||||
/* ZSTDMT_getNbWorkers():
|
||||
* @return nb threads currently active in mtctx.
|
||||
* mtctx must be valid */
|
||||
unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx);
|
||||
|
||||
/* ZSTDMT_getFrameProgression():
|
||||
* tells how much data has been consumed (input) and produced (output) for current frame.
|
||||
* able to count progression inside worker threads.
|
||||
*/
|
||||
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
|
||||
|
||||
|
||||
/*! ZSTDMT_initCStream_internal() :
|
||||
* Private use only. Init streaming operation.
|
||||
* expects params to be valid.
|
||||
* must receive dict, or cdict, or none, but not both.
|
||||
* @return : 0, or an error code */
|
||||
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
|
||||
const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTDMT_COMPRESS_H */
|
Loading…
Reference in New Issue
Block a user