Browse code

Update zstd and lz4 versions

Mike Smith authored on 29/11/2020 12:26:04
Showing1 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,1538 @@
1
+/*
2
+    LZ4 HC - High Compression Mode of LZ4
3
+    Copyright (C) 2011-2017, Yann Collet.
4
+
5
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
+
7
+    Redistribution and use in source and binary forms, with or without
8
+    modification, are permitted provided that the following conditions are
9
+    met:
10
+
11
+    * Redistributions of source code must retain the above copyright
12
+    notice, this list of conditions and the following disclaimer.
13
+    * Redistributions in binary form must reproduce the above
14
+    copyright notice, this list of conditions and the following disclaimer
15
+    in the documentation and/or other materials provided with the
16
+    distribution.
17
+
18
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+
30
+    You can contact the author at :
31
+       - LZ4 source repository : https://github.com/lz4/lz4
32
+       - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
33
+*/
34
+/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
35
+
36
+
37
+/* *************************************
38
+*  Tuning Parameter
39
+***************************************/
40
+
41
+/*! HEAPMODE :
42
+ *  Select how default compression function will allocate workplace memory,
43
+ *  in stack (0:fastest), or in heap (1:requires malloc()).
44
+ *  Since workplace is rather large, heap mode is recommended.
45
+ */
46
+#ifndef LZ4HC_HEAPMODE
47
+#  define LZ4HC_HEAPMODE 1
48
+#endif
49
+
50
+
51
+/*===    Dependency    ===*/
52
+#define LZ4_HC_STATIC_LINKING_ONLY
53
+#include "lz4hc.h"
54
+
55
+
56
+/*===   Common LZ4 definitions   ===*/
57
+#if defined(__GNUC__)
58
+#  pragma GCC diagnostic ignored "-Wunused-function"
59
+#endif
60
+#if defined (__clang__)
61
+#  pragma clang diagnostic ignored "-Wunused-function"
62
+#endif
63
+
64
+/*===   Enums   ===*/
65
+typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
66
+
67
+
68
+#define LZ4_COMMONDEFS_ONLY
69
+#ifndef LZ4_SRC_INCLUDED
70
+#include "lz4.c"   /* LZ4_count, constants, mem */
71
+#endif
72
+
73
+/*===   Constants   ===*/
74
+#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
75
+#define LZ4_OPT_NUM   (1<<12)
76
+
77
+
78
+/*===   Macros   ===*/
79
+#define MIN(a,b)   ( (a) < (b) ? (a) : (b) )
80
+#define MAX(a,b)   ( (a) > (b) ? (a) : (b) )
81
+#define HASH_FUNCTION(i)         (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
82
+#define DELTANEXTMAXD(p)         chainTable[(p) & LZ4HC_MAXD_MASK]    /* flexible, LZ4HC_MAXD dependent */
83
+#define DELTANEXTU16(table, pos) table[(U16)(pos)]   /* faster */
84
+/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
85
+#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
86
+
87
+static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
88
+
89
+
90
+/**************************************
91
+*  HC Compression
92
+**************************************/
93
+static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
94
+{
95
+    MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
96
+    MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
97
+}
98
+
99
+static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
100
+{
101
+    uptrval startingOffset = (uptrval)(hc4->end - hc4->base);
102
+    if (startingOffset > 1 GB) {
103
+        LZ4HC_clearTables(hc4);
104
+        startingOffset = 0;
105
+    }
106
+    startingOffset += 64 KB;
107
+    hc4->nextToUpdate = (U32) startingOffset;
108
+    hc4->base = start - startingOffset;
109
+    hc4->end = start;
110
+    hc4->dictBase = start - startingOffset;
111
+    hc4->dictLimit = (U32) startingOffset;
112
+    hc4->lowLimit = (U32) startingOffset;
113
+}
114
+
115
+
116
+/* Update chains up to ip (excluded) */
117
+LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
118
+{
119
+    U16* const chainTable = hc4->chainTable;
120
+    U32* const hashTable  = hc4->hashTable;
121
+    const BYTE* const base = hc4->base;
122
+    U32 const target = (U32)(ip - base);
123
+    U32 idx = hc4->nextToUpdate;
124
+
125
+    while (idx < target) {
126
+        U32 const h = LZ4HC_hashPtr(base+idx);
127
+        size_t delta = idx - hashTable[h];
128
+        if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
129
+        DELTANEXTU16(chainTable, idx) = (U16)delta;
130
+        hashTable[h] = idx;
131
+        idx++;
132
+    }
133
+
134
+    hc4->nextToUpdate = target;
135
+}
136
+
137
+/** LZ4HC_countBack() :
138
+ * @return : negative value, nb of common bytes before ip/match */
139
+LZ4_FORCE_INLINE
140
+int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
141
+                    const BYTE* const iMin, const BYTE* const mMin)
142
+{
143
+    int back = 0;
144
+    int const min = (int)MAX(iMin - ip, mMin - match);
145
+    assert(min <= 0);
146
+    assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
147
+    assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
148
+    while ( (back > min)
149
+         && (ip[back-1] == match[back-1]) )
150
+            back--;
151
+    return back;
152
+}
153
+
154
+#if defined(_MSC_VER)
155
+#  define LZ4HC_rotl32(x,r) _rotl(x,r)
156
+#else
157
+#  define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
158
+#endif
159
+
160
+
161
+static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
162
+{
163
+    size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
164
+    if (bitsToRotate == 0)
165
+        return pattern;
166
+    return LZ4HC_rotl32(pattern, (int)bitsToRotate);
167
+}
168
+
169
+/* LZ4HC_countPattern() :
170
+ * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
171
+static unsigned
172
+LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
173
+{
174
+    const BYTE* const iStart = ip;
175
+    reg_t const pattern = (sizeof(pattern)==8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32;
176
+
177
+    while (likely(ip < iEnd-(sizeof(pattern)-1))) {
178
+        reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
179
+        if (!diff) { ip+=sizeof(pattern); continue; }
180
+        ip += LZ4_NbCommonBytes(diff);
181
+        return (unsigned)(ip - iStart);
182
+    }
183
+
184
+    if (LZ4_isLittleEndian()) {
185
+        reg_t patternByte = pattern;
186
+        while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
187
+            ip++; patternByte >>= 8;
188
+        }
189
+    } else {  /* big endian */
190
+        U32 bitOffset = (sizeof(pattern)*8) - 8;
191
+        while (ip < iEnd) {
192
+            BYTE const byte = (BYTE)(pattern >> bitOffset);
193
+            if (*ip != byte) break;
194
+            ip ++; bitOffset -= 8;
195
+        }
196
+    }
197
+
198
+    return (unsigned)(ip - iStart);
199
+}
200
+
201
+/* LZ4HC_reverseCountPattern() :
202
+ * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
203
+ * read using natural platform endianess */
204
+static unsigned
205
+LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
206
+{
207
+    const BYTE* const iStart = ip;
208
+
209
+    while (likely(ip >= iLow+4)) {
210
+        if (LZ4_read32(ip-4) != pattern) break;
211
+        ip -= 4;
212
+    }
213
+    {   const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */
214
+        while (likely(ip>iLow)) {
215
+            if (ip[-1] != *bytePtr) break;
216
+            ip--; bytePtr--;
217
+    }   }
218
+    return (unsigned)(iStart - ip);
219
+}
220
+
221
+/* LZ4HC_protectDictEnd() :
222
+ * Checks if the match is in the last 3 bytes of the dictionary, so reading the
223
+ * 4 byte MINMATCH would overflow.
224
+ * @returns true if the match index is okay.
225
+ */
226
+static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
227
+{
228
+    return ((U32)((dictLimit - 1) - matchIndex) >= 3);
229
+}
230
+
231
+typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
232
+typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
233
+
234
+LZ4_FORCE_INLINE int
235
+LZ4HC_InsertAndGetWiderMatch (
236
+    LZ4HC_CCtx_internal* hc4,
237
+    const BYTE* const ip,
238
+    const BYTE* const iLowLimit,
239
+    const BYTE* const iHighLimit,
240
+    int longest,
241
+    const BYTE** matchpos,
242
+    const BYTE** startpos,
243
+    const int maxNbAttempts,
244
+    const int patternAnalysis,
245
+    const int chainSwap,
246
+    const dictCtx_directive dict,
247
+    const HCfavor_e favorDecSpeed)
248
+{
249
+    U16* const chainTable = hc4->chainTable;
250
+    U32* const HashTable = hc4->hashTable;
251
+    const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
252
+    const BYTE* const base = hc4->base;
253
+    const U32 dictLimit = hc4->dictLimit;
254
+    const BYTE* const lowPrefixPtr = base + dictLimit;
255
+    const U32 ipIndex = (U32)(ip - base);
256
+    const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
257
+    const BYTE* const dictBase = hc4->dictBase;
258
+    int const lookBackLength = (int)(ip-iLowLimit);
259
+    int nbAttempts = maxNbAttempts;
260
+    U32 matchChainPos = 0;
261
+    U32 const pattern = LZ4_read32(ip);
262
+    U32 matchIndex;
263
+    repeat_state_e repeat = rep_untested;
264
+    size_t srcPatternLength = 0;
265
+
266
+    DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
267
+    /* First Match */
268
+    LZ4HC_Insert(hc4, ip);
269
+    matchIndex = HashTable[LZ4HC_hashPtr(ip)];
270
+    DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
271
+                matchIndex, lowestMatchIndex);
272
+
273
+    while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) {
274
+        int matchLength=0;
275
+        nbAttempts--;
276
+        assert(matchIndex < ipIndex);
277
+        if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
278
+            /* do nothing */
279
+        } else if (matchIndex >= dictLimit) {   /* within current Prefix */
280
+            const BYTE* const matchPtr = base + matchIndex;
281
+            assert(matchPtr >= lowPrefixPtr);
282
+            assert(matchPtr < ip);
283
+            assert(longest >= 1);
284
+            if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
285
+                if (LZ4_read32(matchPtr) == pattern) {
286
+                    int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr) : 0;
287
+                    matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
288
+                    matchLength -= back;
289
+                    if (matchLength > longest) {
290
+                        longest = matchLength;
291
+                        *matchpos = matchPtr + back;
292
+                        *startpos = ip + back;
293
+            }   }   }
294
+        } else {   /* lowestMatchIndex <= matchIndex < dictLimit */
295
+            const BYTE* const matchPtr = dictBase + matchIndex;
296
+            if (LZ4_read32(matchPtr) == pattern) {
297
+                const BYTE* const dictStart = dictBase + hc4->lowLimit;
298
+                int back = 0;
299
+                const BYTE* vLimit = ip + (dictLimit - matchIndex);
300
+                if (vLimit > iHighLimit) vLimit = iHighLimit;
301
+                matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
302
+                if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
303
+                    matchLength += LZ4_count(ip+matchLength, lowPrefixPtr, iHighLimit);
304
+                back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
305
+                matchLength -= back;
306
+                if (matchLength > longest) {
307
+                    longest = matchLength;
308
+                    *matchpos = base + matchIndex + back;   /* virtual pos, relative to ip, to retrieve offset */
309
+                    *startpos = ip + back;
310
+        }   }   }
311
+
312
+        if (chainSwap && matchLength==longest) {    /* better match => select a better chain */
313
+            assert(lookBackLength==0);   /* search forward only */
314
+            if (matchIndex + (U32)longest <= ipIndex) {
315
+                int const kTrigger = 4;
316
+                U32 distanceToNextMatch = 1;
317
+                int const end = longest - MINMATCH + 1;
318
+                int step = 1;
319
+                int accel = 1 << kTrigger;
320
+                int pos;
321
+                for (pos = 0; pos < end; pos += step) {
322
+                    U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
323
+                    step = (accel++ >> kTrigger);
324
+                    if (candidateDist > distanceToNextMatch) {
325
+                        distanceToNextMatch = candidateDist;
326
+                        matchChainPos = (U32)pos;
327
+                        accel = 1 << kTrigger;
328
+                    }
329
+                }
330
+                if (distanceToNextMatch > 1) {
331
+                    if (distanceToNextMatch > matchIndex) break;   /* avoid overflow */
332
+                    matchIndex -= distanceToNextMatch;
333
+                    continue;
334
+        }   }   }
335
+
336
+        {   U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
337
+            if (patternAnalysis && distNextMatch==1 && matchChainPos==0) {
338
+                U32 const matchCandidateIdx = matchIndex-1;
339
+                /* may be a repeated pattern */
340
+                if (repeat == rep_untested) {
341
+                    if ( ((pattern & 0xFFFF) == (pattern >> 16))
342
+                      &  ((pattern & 0xFF)   == (pattern >> 24)) ) {
343
+                        repeat = rep_confirmed;
344
+                        srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
345
+                    } else {
346
+                        repeat = rep_not;
347
+                }   }
348
+                if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
349
+                  && LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
350
+                    const int extDict = matchCandidateIdx < dictLimit;
351
+                    const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
352
+                    if (LZ4_read32(matchPtr) == pattern) {  /* good candidate */
353
+                        const BYTE* const dictStart = dictBase + hc4->lowLimit;
354
+                        const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
355
+                        size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
356
+                        if (extDict && matchPtr + forwardPatternLength == iLimit) {
357
+                            U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
358
+                            forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
359
+                        }
360
+                        {   const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
361
+                            size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
362
+                            size_t currentSegmentLength;
363
+                            if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
364
+                                U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
365
+                                backLength += LZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
366
+                            }
367
+                            /* Limit backLength not go further than lowestMatchIndex */
368
+                            backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
369
+                            assert(matchCandidateIdx - backLength >= lowestMatchIndex);
370
+                            currentSegmentLength = backLength + forwardPatternLength;
371
+                            /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
372
+                            if ( (currentSegmentLength >= srcPatternLength)   /* current pattern segment large enough to contain full srcPatternLength */
373
+                              && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
374
+                                U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength;  /* best position, full pattern, might be followed by more match */
375
+                                if (LZ4HC_protectDictEnd(dictLimit, newMatchIndex))
376
+                                    matchIndex = newMatchIndex;
377
+                                else {
378
+                                    /* Can only happen if started in the prefix */
379
+                                    assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
380
+                                    matchIndex = dictLimit;
381
+                                }
382
+                            } else {
383
+                                U32 const newMatchIndex = matchCandidateIdx - (U32)backLength;   /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
384
+                                if (!LZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
385
+                                    assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
386
+                                    matchIndex = dictLimit;
387
+                                } else {
388
+                                    matchIndex = newMatchIndex;
389
+                                    if (lookBackLength==0) {  /* no back possible */
390
+                                        size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
391
+                                        if ((size_t)longest < maxML) {
392
+                                            assert(base + matchIndex < ip);
393
+                                            if (ip - (base+matchIndex) > LZ4_DISTANCE_MAX) break;
394
+                                            assert(maxML < 2 GB);
395
+                                            longest = (int)maxML;
396
+                                            *matchpos = base + matchIndex;   /* virtual pos, relative to ip, to retrieve offset */
397
+                                            *startpos = ip;
398
+                                        }
399
+                                        {   U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
400
+                                            if (distToNextPattern > matchIndex) break;  /* avoid overflow */
401
+                                            matchIndex -= distToNextPattern;
402
+                        }   }   }   }   }
403
+                        continue;
404
+                }   }
405
+        }   }   /* PA optimization */
406
+
407
+        /* follow current chain */
408
+        matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos);
409
+
410
+    }  /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
411
+
412
+    if ( dict == usingDictCtxHc
413
+      && nbAttempts
414
+      && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
415
+        size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
416
+        U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
417
+        assert(dictEndOffset <= 1 GB);
418
+        matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
419
+        while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
420
+            const BYTE* const matchPtr = dictCtx->base + dictMatchIndex;
421
+
422
+            if (LZ4_read32(matchPtr) == pattern) {
423
+                int mlt;
424
+                int back = 0;
425
+                const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
426
+                if (vLimit > iHighLimit) vLimit = iHighLimit;
427
+                mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
428
+                back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
429
+                mlt -= back;
430
+                if (mlt > longest) {
431
+                    longest = mlt;
432
+                    *matchpos = base + matchIndex + back;
433
+                    *startpos = ip + back;
434
+            }   }
435
+
436
+            {   U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
437
+                dictMatchIndex -= nextOffset;
438
+                matchIndex -= nextOffset;
439
+    }   }   }
440
+
441
+    return longest;
442
+}
443
+
444
+LZ4_FORCE_INLINE
445
+int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4,   /* Index table will be updated */
446
+                                 const BYTE* const ip, const BYTE* const iLimit,
447
+                                 const BYTE** matchpos,
448
+                                 const int maxNbAttempts,
449
+                                 const int patternAnalysis,
450
+                                 const dictCtx_directive dict)
451
+{
452
+    const BYTE* uselessPtr = ip;
453
+    /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
454
+     * but this won't be the case here, as we define iLowLimit==ip,
455
+     * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
456
+    return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
457
+}
458
+
459
+/* LZ4HC_encodeSequence() :
460
+ * @return : 0 if ok,
461
+ *           1 if buffer issue detected */
462
+LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
463
+    const BYTE** ip,
464
+    BYTE** op,
465
+    const BYTE** anchor,
466
+    int matchLength,
467
+    const BYTE* const match,
468
+    limitedOutput_directive limit,
469
+    BYTE* oend)
470
+{
471
+    size_t length;
472
+    BYTE* const token = (*op)++;
473
+
474
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
475
+    static const BYTE* start = NULL;
476
+    static U32 totalCost = 0;
477
+    U32 const pos = (start==NULL) ? 0 : (U32)(*anchor - start);
478
+    U32 const ll = (U32)(*ip - *anchor);
479
+    U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
480
+    U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
481
+    U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
482
+    if (start==NULL) start = *anchor;  /* only works for single segment */
483
+    /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
484
+    DEBUGLOG(6, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u",
485
+                pos,
486
+                (U32)(*ip - *anchor), matchLength, (U32)(*ip-match),
487
+                cost, totalCost);
488
+    totalCost += cost;
489
+#endif
490
+
491
+    /* Encode Literal length */
492
+    length = (size_t)(*ip - *anchor);
493
+    if ((limit) && ((*op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1;   /* Check output limit */
494
+    if (length >= RUN_MASK) {
495
+        size_t len = length - RUN_MASK;
496
+        *token = (RUN_MASK << ML_BITS);
497
+        for(; len >= 255 ; len -= 255) *(*op)++ = 255;
498
+        *(*op)++ = (BYTE)len;
499
+    } else {
500
+        *token = (BYTE)(length << ML_BITS);
501
+    }
502
+
503
+    /* Copy Literals */
504
+    LZ4_wildCopy8(*op, *anchor, (*op) + length);
505
+    *op += length;
506
+
507
+    /* Encode Offset */
508
+    assert( (*ip - match) <= LZ4_DISTANCE_MAX );   /* note : consider providing offset as a value, rather than as a pointer difference */
509
+    LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2;
510
+
511
+    /* Encode MatchLength */
512
+    assert(matchLength >= MINMATCH);
513
+    length = (size_t)matchLength - MINMATCH;
514
+    if ((limit) && (*op + (length / 255) + (1 + LASTLITERALS) > oend)) return 1;   /* Check output limit */
515
+    if (length >= ML_MASK) {
516
+        *token += ML_MASK;
517
+        length -= ML_MASK;
518
+        for(; length >= 510 ; length -= 510) { *(*op)++ = 255; *(*op)++ = 255; }
519
+        if (length >= 255) { length -= 255; *(*op)++ = 255; }
520
+        *(*op)++ = (BYTE)length;
521
+    } else {
522
+        *token += (BYTE)(length);
523
+    }
524
+
525
+    /* Prepare next loop */
526
+    *ip += matchLength;
527
+    *anchor = *ip;
528
+
529
+    return 0;
530
+}
531
+
532
+LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
533
+    LZ4HC_CCtx_internal* const ctx,
534
+    const char* const source,
535
+    char* const dest,
536
+    int* srcSizePtr,
537
+    int const maxOutputSize,
538
+    unsigned maxNbAttempts,
539
+    const limitedOutput_directive limit,
540
+    const dictCtx_directive dict
541
+    )
542
+{
543
+    const int inputSize = *srcSizePtr;
544
+    const int patternAnalysis = (maxNbAttempts > 128);   /* levels 9+ */
545
+
546
+    const BYTE* ip = (const BYTE*) source;
547
+    const BYTE* anchor = ip;
548
+    const BYTE* const iend = ip + inputSize;
549
+    const BYTE* const mflimit = iend - MFLIMIT;
550
+    const BYTE* const matchlimit = (iend - LASTLITERALS);
551
+
552
+    BYTE* optr = (BYTE*) dest;
553
+    BYTE* op = (BYTE*) dest;
554
+    BYTE* oend = op + maxOutputSize;
555
+
556
+    int   ml0, ml, ml2, ml3;
557
+    const BYTE* start0;
558
+    const BYTE* ref0;
559
+    const BYTE* ref = NULL;
560
+    const BYTE* start2 = NULL;
561
+    const BYTE* ref2 = NULL;
562
+    const BYTE* start3 = NULL;
563
+    const BYTE* ref3 = NULL;
564
+
565
+    /* init */
566
+    *srcSizePtr = 0;
567
+    if (limit == fillOutput) oend -= LASTLITERALS;                  /* Hack for support LZ4 format restriction */
568
+    if (inputSize < LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
569
+
570
+    /* Main Loop */
571
+    while (ip <= mflimit) {
572
+        ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict);
573
+        if (ml<MINMATCH) { ip++; continue; }
574
+
575
+        /* saved, in case we would skip too much */
576
+        start0 = ip; ref0 = ref; ml0 = ml;
577
+
578
+_Search2:
579
+        if (ip+ml <= mflimit) {
580
+            ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
581
+                            ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2,
582
+                            maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
583
+        } else {
584
+            ml2 = ml;
585
+        }
586
+
587
+        if (ml2 == ml) { /* No better match => encode ML1 */
588
+            optr = op;
589
+            if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
590
+            continue;
591
+        }
592
+
593
+        if (start0 < ip) {   /* first match was skipped at least once */
594
+            if (start2 < ip + ml0) {  /* squeezing ML1 between ML0(original ML1) and ML2 */
595
+                ip = start0; ref = ref0; ml = ml0;  /* restore initial ML1 */
596
+        }   }
597
+
598
+        /* Here, start0==ip */
599
+        if ((start2 - ip) < 3) {  /* First Match too small : removed */
600
+            ml = ml2;
601
+            ip = start2;
602
+            ref =ref2;
603
+            goto _Search2;
604
+        }
605
+
606
+_Search3:
607
+        /* At this stage, we have :
608
+        *  ml2 > ml1, and
609
+        *  ip1+3 <= ip2 (usually < ip1+ml1) */
610
+        if ((start2 - ip) < OPTIMAL_ML) {
611
+            int correction;
612
+            int new_ml = ml;
613
+            if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
614
+            if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
615
+            correction = new_ml - (int)(start2 - ip);
616
+            if (correction > 0) {
617
+                start2 += correction;
618
+                ref2 += correction;
619
+                ml2 -= correction;
620
+            }
621
+        }
622
+        /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
623
+
624
+        if (start2 + ml2 <= mflimit) {
625
+            ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
626
+                            start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3,
627
+                            maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
628
+        } else {
629
+            ml3 = ml2;
630
+        }
631
+
632
+        if (ml3 == ml2) {  /* No better match => encode ML1 and ML2 */
633
+            /* ip & ref are known; Now for ml */
634
+            if (start2 < ip+ml)  ml = (int)(start2 - ip);
635
+            /* Now, encode 2 sequences */
636
+            optr = op;
637
+            if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
638
+            ip = start2;
639
+            optr = op;
640
+            if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) goto _dest_overflow;
641
+            continue;
642
+        }
643
+
644
+        if (start3 < ip+ml+3) {  /* Not enough space for match 2 : remove it */
645
+            if (start3 >= (ip+ml)) {  /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
646
+                if (start2 < ip+ml) {
647
+                    int correction = (int)(ip+ml - start2);
648
+                    start2 += correction;
649
+                    ref2 += correction;
650
+                    ml2 -= correction;
651
+                    if (ml2 < MINMATCH) {
652
+                        start2 = start3;
653
+                        ref2 = ref3;
654
+                        ml2 = ml3;
655
+                    }
656
+                }
657
+
658
+                optr = op;
659
+                if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
660
+                ip  = start3;
661
+                ref = ref3;
662
+                ml  = ml3;
663
+
664
+                start0 = start2;
665
+                ref0 = ref2;
666
+                ml0 = ml2;
667
+                goto _Search2;
668
+            }
669
+
670
+            start2 = start3;
671
+            ref2 = ref3;
672
+            ml2 = ml3;
673
+            goto _Search3;
674
+        }
675
+
676
+        /*
677
+        * OK, now we have 3 ascending matches;
678
+        * let's write the first one ML1.
679
+        * ip & ref are known; Now decide ml.
680
+        */
681
+        if (start2 < ip+ml) {
682
+            if ((start2 - ip) < OPTIMAL_ML) {
683
+                int correction;
684
+                if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
685
+                if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
686
+                correction = ml - (int)(start2 - ip);
687
+                if (correction > 0) {
688
+                    start2 += correction;
689
+                    ref2 += correction;
690
+                    ml2 -= correction;
691
+                }
692
+            } else {
693
+                ml = (int)(start2 - ip);
694
+            }
695
+        }
696
+        optr = op;
697
+        if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
698
+
699
+        /* ML2 becomes ML1 */
700
+        ip = start2; ref = ref2; ml = ml2;
701
+
702
+        /* ML3 becomes ML2 */
703
+        start2 = start3; ref2 = ref3; ml2 = ml3;
704
+
705
+        /* let's find a new ML3 */
706
+        goto _Search3;
707
+    }
708
+
709
+_last_literals:
710
+    /* Encode Last Literals */
711
+    {   size_t lastRunSize = (size_t)(iend - anchor);  /* literals */
712
+        size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
713
+        size_t const totalSize = 1 + litLength + lastRunSize;
714
+        if (limit == fillOutput) oend += LASTLITERALS;  /* restore correct value */
715
+        if (limit && (op + totalSize > oend)) {
716
+            if (limit == limitedOutput) return 0;  /* Check output limit */
717
+            /* adapt lastRunSize to fill 'dest' */
718
+            lastRunSize  = (size_t)(oend - op) - 1;
719
+            litLength = (lastRunSize + 255 - RUN_MASK) / 255;
720
+            lastRunSize -= litLength;
721
+        }
722
+        ip = anchor + lastRunSize;
723
+
724
+        if (lastRunSize >= RUN_MASK) {
725
+            size_t accumulator = lastRunSize - RUN_MASK;
726
+            *op++ = (RUN_MASK << ML_BITS);
727
+            for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
728
+            *op++ = (BYTE) accumulator;
729
+        } else {
730
+            *op++ = (BYTE)(lastRunSize << ML_BITS);
731
+        }
732
+        memcpy(op, anchor, lastRunSize);
733
+        op += lastRunSize;
734
+    }
735
+
736
+    /* End */
737
+    *srcSizePtr = (int) (((const char*)ip) - source);
738
+    return (int) (((char*)op)-dest);
739
+
740
+_dest_overflow:
741
+    if (limit == fillOutput) {
742
+        op = optr;  /* restore correct out pointer */
743
+        goto _last_literals;
744
+    }
745
+    return 0;
746
+}
747
+
748
+
749
+static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
750
+    const char* const source, char* dst,
751
+    int* srcSizePtr, int dstCapacity,
752
+    int const nbSearches, size_t sufficient_len,
753
+    const limitedOutput_directive limit, int const fullUpdate,
754
+    const dictCtx_directive dict,
755
+    HCfavor_e favorDecSpeed);
756
+
757
+
758
+LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
759
+    LZ4HC_CCtx_internal* const ctx,
760
+    const char* const src,
761
+    char* const dst,
762
+    int* const srcSizePtr,
763
+    int const dstCapacity,
764
+    int cLevel,
765
+    const limitedOutput_directive limit,
766
+    const dictCtx_directive dict
767
+    )
768
+{
769
+    typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
770
+    typedef struct {
771
+        lz4hc_strat_e strat;
772
+        U32 nbSearches;
773
+        U32 targetLength;
774
+    } cParams_t;
775
+    static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
776
+        { lz4hc,     2, 16 },  /* 0, unused */
777
+        { lz4hc,     2, 16 },  /* 1, unused */
778
+        { lz4hc,     2, 16 },  /* 2, unused */
779
+        { lz4hc,     4, 16 },  /* 3 */
780
+        { lz4hc,     8, 16 },  /* 4 */
781
+        { lz4hc,    16, 16 },  /* 5 */
782
+        { lz4hc,    32, 16 },  /* 6 */
783
+        { lz4hc,    64, 16 },  /* 7 */
784
+        { lz4hc,   128, 16 },  /* 8 */
785
+        { lz4hc,   256, 16 },  /* 9 */
786
+        { lz4opt,   96, 64 },  /*10==LZ4HC_CLEVEL_OPT_MIN*/
787
+        { lz4opt,  512,128 },  /*11 */
788
+        { lz4opt,16384,LZ4_OPT_NUM },  /* 12==LZ4HC_CLEVEL_MAX */
789
+    };
790
+
791
+    DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d)", ctx, src, *srcSizePtr);
792
+
793
+    if (limit == fillOutput && dstCapacity < 1) return 0;   /* Impossible to store anything */
794
+    if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0;    /* Unsupported input size (too large or negative) */
795
+
796
+    ctx->end += *srcSizePtr;
797
+    if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT;   /* note : convention is different from lz4frame, maybe something to review */
798
+    cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
799
+    {   cParams_t const cParam = clTable[cLevel];
800
+        HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
801
+        int result;
802
+
803
+        if (cParam.strat == lz4hc) {
804
+            result = LZ4HC_compress_hashChain(ctx,
805
+                                src, dst, srcSizePtr, dstCapacity,
806
+                                cParam.nbSearches, limit, dict);
807
+        } else {
808
+            assert(cParam.strat == lz4opt);
809
+            result = LZ4HC_compress_optimal(ctx,
810
+                                src, dst, srcSizePtr, dstCapacity,
811
+                                (int)cParam.nbSearches, cParam.targetLength, limit,
812
+                                cLevel == LZ4HC_CLEVEL_MAX,   /* ultra mode */
813
+                                dict, favor);
814
+        }
815
+        if (result <= 0) ctx->dirty = 1;
816
+        return result;
817
+    }
818
+}
819
+
820
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
821
+
822
+static int
823
+LZ4HC_compress_generic_noDictCtx (
824
+        LZ4HC_CCtx_internal* const ctx,
825
+        const char* const src,
826
+        char* const dst,
827
+        int* const srcSizePtr,
828
+        int const dstCapacity,
829
+        int cLevel,
830
+        limitedOutput_directive limit
831
+        )
832
+{
833
+    assert(ctx->dictCtx == NULL);
834
+    return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
835
+}
836
+
837
+static int
838
+LZ4HC_compress_generic_dictCtx (
839
+        LZ4HC_CCtx_internal* const ctx,
840
+        const char* const src,
841
+        char* const dst,
842
+        int* const srcSizePtr,
843
+        int const dstCapacity,
844
+        int cLevel,
845
+        limitedOutput_directive limit
846
+        )
847
+{
848
+    const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
849
+    assert(ctx->dictCtx != NULL);
850
+    if (position >= 64 KB) {
851
+        ctx->dictCtx = NULL;
852
+        return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
853
+    } else if (position == 0 && *srcSizePtr > 4 KB) {
854
+        memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
855
+        LZ4HC_setExternalDict(ctx, (const BYTE *)src);
856
+        ctx->compressionLevel = (short)cLevel;
857
+        return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
858
+    } else {
859
+        return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc);
860
+    }
861
+}
862
+
863
+static int
864
+LZ4HC_compress_generic (
865
+        LZ4HC_CCtx_internal* const ctx,
866
+        const char* const src,
867
+        char* const dst,
868
+        int* const srcSizePtr,
869
+        int const dstCapacity,
870
+        int cLevel,
871
+        limitedOutput_directive limit
872
+        )
873
+{
874
+    if (ctx->dictCtx == NULL) {
875
+        return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
876
+    } else {
877
+        return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
878
+    }
879
+}
880
+
881
+
882
+int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
883
+
884
+#ifndef _MSC_VER  /* for some reason, Visual fails the aligment test on 32-bit x86 :
885
+                   * it reports an aligment of 8-bytes,
886
+                   * while actually aligning LZ4_streamHC_t on 4 bytes. */
887
+static size_t LZ4_streamHC_t_alignment(void)
888
+{
889
+    struct { char c; LZ4_streamHC_t t; } t_a;
890
+    return sizeof(t_a) - sizeof(t_a.t);
891
+}
892
+#endif
893
+
894
+/* state is presumed correctly initialized,
895
+ * in which case its size and alignment have already been validate */
896
+int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
897
+{
898
+    LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
899
+#ifndef _MSC_VER  /* for some reason, Visual fails the aligment test on 32-bit x86 :
900
+                   * it reports an aligment of 8-bytes,
901
+                   * while actually aligning LZ4_streamHC_t on 4 bytes. */
902
+    assert(((size_t)state & (LZ4_streamHC_t_alignment() - 1)) == 0);  /* check alignment */
903
+#endif
904
+    if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0;   /* Error : state is not aligned for pointers (32 or 64 bits) */
905
+    LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
906
+    LZ4HC_init_internal (ctx, (const BYTE*)src);
907
+    if (dstCapacity < LZ4_compressBound(srcSize))
908
+        return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
909
+    else
910
+        return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
911
+}
912
+
913
+int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
914
+{
915
+    LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
916
+    if (ctx==NULL) return 0;   /* init failure */
917
+    return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
918
+}
919
+
920
+int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
921
+{
922
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
923
+    LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
924
+#else
925
+    LZ4_streamHC_t state;
926
+    LZ4_streamHC_t* const statePtr = &state;
927
+#endif
928
+    int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
929
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
930
+    FREEMEM(statePtr);
931
+#endif
932
+    return cSize;
933
+}
934
+
935
+/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
936
+int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
937
+{
938
+    LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
939
+    if (ctx==NULL) return 0;   /* init failure */
940
+    LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source);
941
+    LZ4_setCompressionLevel(ctx, cLevel);
942
+    return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput);
943
+}
944
+
945
+
946
+
947
+/**************************************
948
+*  Streaming Functions
949
+**************************************/
950
+/* allocation */
951
+LZ4_streamHC_t* LZ4_createStreamHC(void)
952
+{
953
+    LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
954
+    if (LZ4_streamHCPtr==NULL) return NULL;
955
+    LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));  /* full initialization, malloc'ed buffer can be full of garbage */
956
+    return LZ4_streamHCPtr;
957
+}
958
+
959
+int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
960
+{
961
+    DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
962
+    if (!LZ4_streamHCPtr) return 0;  /* support free on NULL */
963
+    FREEMEM(LZ4_streamHCPtr);
964
+    return 0;
965
+}
966
+
967
+
968
+LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
969
+{
970
+    LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
971
+    if (buffer == NULL) return NULL;
972
+    if (size < sizeof(LZ4_streamHC_t)) return NULL;
973
+#ifndef _MSC_VER  /* for some reason, Visual fails the aligment test on 32-bit x86 :
974
+                   * it reports an aligment of 8-bytes,
975
+                   * while actually aligning LZ4_streamHC_t on 4 bytes. */
976
+    if (((size_t)buffer) & (LZ4_streamHC_t_alignment() - 1)) return NULL;  /* alignment check */
977
+#endif
978
+    /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
979
+    LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
980
+    DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", LZ4_streamHCPtr, (unsigned)size);
981
+    /* end-base will trigger a clearTable on starting compression */
982
+    LZ4_streamHCPtr->internal_donotuse.end = (const BYTE *)(ptrdiff_t)-1;
983
+    LZ4_streamHCPtr->internal_donotuse.base = NULL;
984
+    LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
985
+    LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = 0;
986
+    LZ4_streamHCPtr->internal_donotuse.dirty = 0;
987
+    LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
988
+    return LZ4_streamHCPtr;
989
+}
990
+
991
+/* just a stub */
992
+void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
993
+{
994
+    LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
995
+    LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
996
+}
997
+
998
+void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
999
+{
1000
+    DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
1001
+    if (LZ4_streamHCPtr->internal_donotuse.dirty) {
1002
+        LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1003
+    } else {
1004
+        /* preserve end - base : can trigger clearTable's threshold */
1005
+        LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.base;
1006
+        LZ4_streamHCPtr->internal_donotuse.base = NULL;
1007
+        LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
1008
+    }
1009
+    LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
1010
+}
1011
+
1012
+void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
1013
+{
1014
+    DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel);
1015
+    if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
1016
+    if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
1017
+    LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
1018
+}
1019
+
1020
+void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
1021
+{
1022
+    LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
1023
+}
1024
+
1025
+/* LZ4_loadDictHC() :
1026
+ * LZ4_streamHCPtr is presumed properly initialized */
1027
+int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
1028
+              const char* dictionary, int dictSize)
1029
+{
1030
+    LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
1031
+    DEBUGLOG(4, "LZ4_loadDictHC(%p, %p, %d)", LZ4_streamHCPtr, dictionary, dictSize);
1032
+    assert(LZ4_streamHCPtr != NULL);
1033
+    if (dictSize > 64 KB) {
1034
+        dictionary += (size_t)dictSize - 64 KB;
1035
+        dictSize = 64 KB;
1036
+    }
1037
+    /* need a full initialization, there are bad side-effects when using resetFast() */
1038
+    {   int const cLevel = ctxPtr->compressionLevel;
1039
+        LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1040
+        LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
1041
+    }
1042
+    LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
1043
+    ctxPtr->end = (const BYTE*)dictionary + dictSize;
1044
+    if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
1045
+    return dictSize;
1046
+}
1047
+
1048
+void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
1049
+    working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
1050
+}
1051
+
1052
+/* compression */
1053
+
1054
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
1055
+{
1056
+    DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
1057
+    if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
1058
+        LZ4HC_Insert (ctxPtr, ctxPtr->end-3);   /* Referencing remaining dictionary content */
1059
+
1060
+    /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
1061
+    ctxPtr->lowLimit  = ctxPtr->dictLimit;
1062
+    ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
1063
+    ctxPtr->dictBase  = ctxPtr->base;
1064
+    ctxPtr->base = newBlock - ctxPtr->dictLimit;
1065
+    ctxPtr->end  = newBlock;
1066
+    ctxPtr->nextToUpdate = ctxPtr->dictLimit;   /* match referencing will resume from there */
1067
+
1068
+    /* cannot reference an extDict and a dictCtx at the same time */
1069
+    ctxPtr->dictCtx = NULL;
1070
+}
1071
+
1072
+static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
1073
+                                            const char* src, char* dst,
1074
+                                            int* srcSizePtr, int dstCapacity,
1075
+                                            limitedOutput_directive limit)
1076
+{
1077
+    LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
1078
+    DEBUGLOG(4, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d)",
1079
+                LZ4_streamHCPtr, src, *srcSizePtr);
1080
+    assert(ctxPtr != NULL);
1081
+    /* auto-init if forgotten */
1082
+    if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
1083
+
1084
+    /* Check overflow */
1085
+    if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
1086
+        size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
1087
+        if (dictSize > 64 KB) dictSize = 64 KB;
1088
+        LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
1089
+    }
1090
+
1091
+    /* Check if blocks follow each other */
1092
+    if ((const BYTE*)src != ctxPtr->end)
1093
+        LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
1094
+
1095
+    /* Check overlapping input/dictionary space */
1096
+    {   const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
1097
+        const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
1098
+        const BYTE* const dictEnd   = ctxPtr->dictBase + ctxPtr->dictLimit;
1099
+        if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
1100
+            if (sourceEnd > dictEnd) sourceEnd = dictEnd;
1101
+            ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
1102
+            if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
1103
+        }
1104
+    }
1105
+
1106
+    return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
1107
+}
1108
+
1109
+int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
1110
+{
1111
+    if (dstCapacity < LZ4_compressBound(srcSize))
1112
+        return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
1113
+    else
1114
+        return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
1115
+}
1116
+
1117
+int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
1118
+{
1119
+    return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
1120
+}
1121
+
1122
+
1123
+
1124
+/* dictionary saving */
1125
+
1126
+int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
1127
+{
1128
+    LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
1129
+    int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
1130
+    DEBUGLOG(4, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
1131
+    if (dictSize > 64 KB) dictSize = 64 KB;
1132
+    if (dictSize < 4) dictSize = 0;
1133
+    if (dictSize > prefixSize) dictSize = prefixSize;
1134
+    memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
1135
+    {   U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
1136
+        streamPtr->end = (const BYTE*)safeBuffer + dictSize;
1137
+        streamPtr->base = streamPtr->end - endIndex;
1138
+        streamPtr->dictLimit = endIndex - (U32)dictSize;
1139
+        streamPtr->lowLimit = endIndex - (U32)dictSize;
1140
+        if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit;
1141
+    }
1142
+    return dictSize;
1143
+}
1144
+
1145
+
1146
+/***************************************************
1147
+*  Deprecated Functions
1148
+***************************************************/
1149
+
1150
+/* These functions currently generate deprecation warnings */
1151
+
1152
+/* Wrappers for deprecated compression functions */
1153
+int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
1154
+int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
1155
+int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
1156
+int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
1157
+int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
1158
+int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
1159
+int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
1160
+int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
1161
+int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
1162
+int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
1163
+
1164
+
1165
+/* Deprecated streaming functions */
1166
+int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
1167
+
1168
+/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
1169
+ * @return : 0 on success, !=0 if error */
1170
+int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
1171
+{
1172
+    LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
1173
+    if (hc4 == NULL) return 1;   /* init failed */
1174
+    LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
1175
+    return 0;
1176
+}
1177
+
1178
+void* LZ4_createHC (const char* inputBuffer)
1179
+{
1180
+    LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
1181
+    if (hc4 == NULL) return NULL;   /* not enough memory */
1182
+    LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
1183
+    return hc4;
1184
+}
1185
+
1186
+int LZ4_freeHC (void* LZ4HC_Data)
1187
+{
1188
+    if (!LZ4HC_Data) return 0;  /* support free on NULL */
1189
+    FREEMEM(LZ4HC_Data);
1190
+    return 0;
1191
+}
1192
+
1193
+int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
1194
+{
1195
+    return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
1196
+}
1197
+
1198
+int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
1199
+{
1200
+    return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
1201
+}
1202
+
1203
+char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
1204
+{
1205
+    LZ4_streamHC_t *ctx = (LZ4_streamHC_t*)LZ4HC_Data;
1206
+    const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
1207
+    LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
1208
+    /* avoid const char * -> char * conversion warning :( */
1209
+    return (char *)(uptrval)bufferStart;
1210
+}
1211
+
1212
+
1213
+/* ================================================
1214
+ *  LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
1215
+ * ===============================================*/
1216
+typedef struct {
1217
+    int price;
1218
+    int off;
1219
+    int mlen;
1220
+    int litlen;
1221
+} LZ4HC_optimal_t;
1222
+
1223
+/* price in bytes */
1224
+LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
1225
+{
1226
+    int price = litlen;
1227
+    assert(litlen >= 0);
1228
+    if (litlen >= (int)RUN_MASK)
1229
+        price += 1 + ((litlen-(int)RUN_MASK) / 255);
1230
+    return price;
1231
+}
1232
+
1233
+
1234
+/* requires mlen >= MINMATCH */
1235
+LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
1236
+{
1237
+    int price = 1 + 2 ; /* token + 16-bit offset */
1238
+    assert(litlen >= 0);
1239
+    assert(mlen >= MINMATCH);
1240
+
1241
+    price += LZ4HC_literalsPrice(litlen);
1242
+
1243
+    if (mlen >= (int)(ML_MASK+MINMATCH))
1244
+        price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255);
1245
+
1246
+    return price;
1247
+}
1248
+
1249
+
1250
+typedef struct {
1251
+    int off;
1252
+    int len;
1253
+} LZ4HC_match_t;
1254
+
1255
+LZ4_FORCE_INLINE LZ4HC_match_t
1256
+LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
1257
+                      const BYTE* ip, const BYTE* const iHighLimit,
1258
+                      int minLen, int nbSearches,
1259
+                      const dictCtx_directive dict,
1260
+                      const HCfavor_e favorDecSpeed)
1261
+{
1262
+    LZ4HC_match_t match = { 0 , 0 };
1263
+    const BYTE* matchPtr = NULL;
1264
+    /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
1265
+     * but this won't be the case here, as we define iLowLimit==ip,
1266
+     * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
1267
+    int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
1268
+    if (matchLength <= minLen) return match;
1269
+    if (favorDecSpeed) {
1270
+        if ((matchLength>18) & (matchLength<=36)) matchLength=18;   /* favor shortcut */
1271
+    }
1272
+    match.len = matchLength;
1273
+    match.off = (int)(ip-matchPtr);
1274
+    return match;
1275
+}
1276
+
1277
+
1278
+static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1279
+                                    const char* const source,
1280
+                                    char* dst,
1281
+                                    int* srcSizePtr,
1282
+                                    int dstCapacity,
1283
+                                    int const nbSearches,
1284
+                                    size_t sufficient_len,
1285
+                                    const limitedOutput_directive limit,
1286
+                                    int const fullUpdate,
1287
+                                    const dictCtx_directive dict,
1288
+                                    const HCfavor_e favorDecSpeed)
1289
+{
1290
+#define TRAILING_LITERALS 3
1291
+    LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS];   /* ~64 KB, which is a bit large for stack... */
1292
+
1293
+    const BYTE* ip = (const BYTE*) source;
1294
+    const BYTE* anchor = ip;
1295
+    const BYTE* const iend = ip + *srcSizePtr;
1296
+    const BYTE* const mflimit = iend - MFLIMIT;
1297
+    const BYTE* const matchlimit = iend - LASTLITERALS;
1298
+    BYTE* op = (BYTE*) dst;
1299
+    BYTE* opSaved = (BYTE*) dst;
1300
+    BYTE* oend = op + dstCapacity;
1301
+
1302
+    /* init */
1303
+    DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
1304
+    *srcSizePtr = 0;
1305
+    if (limit == fillOutput) oend -= LASTLITERALS;   /* Hack for support LZ4 format restriction */
1306
+    if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
1307
+
1308
+    /* Main Loop */
1309
+    assert(ip - anchor < LZ4_MAX_INPUT_SIZE);
1310
+    while (ip <= mflimit) {
1311
+         int const llen = (int)(ip - anchor);
1312
+         int best_mlen, best_off;
1313
+         int cur, last_match_pos = 0;
1314
+
1315
+         LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
1316
+         if (firstMatch.len==0) { ip++; continue; }
1317
+
1318
+         if ((size_t)firstMatch.len > sufficient_len) {
1319
+             /* good enough solution : immediate encoding */
1320
+             int const firstML = firstMatch.len;
1321
+             const BYTE* const matchPos = ip - firstMatch.off;
1322
+             opSaved = op;
1323
+             if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) )   /* updates ip, op and anchor */
1324
+                 goto _dest_overflow;
1325
+             continue;
1326
+         }
1327
+
1328
+         /* set prices for first positions (literals) */
1329
+         {   int rPos;
1330
+             for (rPos = 0 ; rPos < MINMATCH ; rPos++) {
1331
+                 int const cost = LZ4HC_literalsPrice(llen + rPos);
1332
+                 opt[rPos].mlen = 1;
1333
+                 opt[rPos].off = 0;
1334
+                 opt[rPos].litlen = llen + rPos;
1335
+                 opt[rPos].price = cost;
1336
+                 DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
1337
+                             rPos, cost, opt[rPos].litlen);
1338
+         }   }
1339
+         /* set prices using initial match */
1340
+         {   int mlen = MINMATCH;
1341
+             int const matchML = firstMatch.len;   /* necessarily < sufficient_len < LZ4_OPT_NUM */
1342
+             int const offset = firstMatch.off;
1343
+             assert(matchML < LZ4_OPT_NUM);
1344
+             for ( ; mlen <= matchML ; mlen++) {
1345
+                 int const cost = LZ4HC_sequencePrice(llen, mlen);
1346
+                 opt[mlen].mlen = mlen;
1347
+                 opt[mlen].off = offset;
1348
+                 opt[mlen].litlen = llen;
1349
+                 opt[mlen].price = cost;
1350
+                 DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
1351
+                             mlen, cost, mlen);
1352
+         }   }
1353
+         last_match_pos = firstMatch.len;
1354
+         {   int addLit;
1355
+             for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
1356
+                 opt[last_match_pos+addLit].mlen = 1; /* literal */
1357
+                 opt[last_match_pos+addLit].off = 0;
1358
+                 opt[last_match_pos+addLit].litlen = addLit;
1359
+                 opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
1360
+                 DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
1361
+                             last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
1362
+         }   }
1363
+
1364
+         /* check further positions */
1365
+         for (cur = 1; cur < last_match_pos; cur++) {
1366
+             const BYTE* const curPtr = ip + cur;
1367
+             LZ4HC_match_t newMatch;
1368
+
1369
+             if (curPtr > mflimit) break;
1370
+             DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u",
1371
+                     cur, opt[cur].price, opt[cur+1].price, cur+1);
1372
+             if (fullUpdate) {
1373
+                 /* not useful to search here if next position has same (or lower) cost */
1374
+                 if ( (opt[cur+1].price <= opt[cur].price)
1375
+                   /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
1376
+                   && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) )
1377
+                     continue;
1378
+             } else {
1379
+                 /* not useful to search here if next position has same (or lower) cost */
1380
+                 if (opt[cur+1].price <= opt[cur].price) continue;
1381
+             }