PageRenderTime 59ms CodeModel.GetById 17ms RepoModel.GetById 1ms app.codeStats 0ms

/src/FreeImage/Source/ZLib/deflate.c

https://bitbucket.org/cabalistic/ogredeps/
C | 1965 lines | 1315 code | 205 blank | 445 comment | 536 complexity | 21d892bcde4a7430d70e8cf4a5c35a69 MD5 | raw file
Possible License(s): LGPL-3.0, BSD-3-Clause, CPL-1.0, Unlicense, GPL-2.0, GPL-3.0, LGPL-2.0, MPL-2.0-no-copyleft-exception, BSD-2-Clause, LGPL-2.1
  1. /* deflate.c -- compress data using the deflation algorithm
  2. * Copyright (C) 1995-2012 Jean-loup Gailly and Mark Adler
  3. * For conditions of distribution and use, see copyright notice in zlib.h
  4. */
  5. /*
  6. * ALGORITHM
  7. *
  8. * The "deflation" process depends on being able to identify portions
  9. * of the input text which are identical to earlier input (within a
  10. * sliding window trailing behind the input currently being processed).
  11. *
  12. * The most straightforward technique turns out to be the fastest for
  13. * most input files: try all possible matches and select the longest.
  14. * The key feature of this algorithm is that insertions into the string
  15. * dictionary are very simple and thus fast, and deletions are avoided
  16. * completely. Insertions are performed at each input character, whereas
  17. * string matches are performed only when the previous match ends. So it
  18. * is preferable to spend more time in matches to allow very fast string
  19. * insertions and avoid deletions. The matching algorithm for small
  20. * strings is inspired from that of Rabin & Karp. A brute force approach
  21. * is used to find longer strings when a small match has been found.
  22. * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
  23. * (by Leonid Broukhis).
  24. * A previous version of this file used a more sophisticated algorithm
  25. * (by Fiala and Greene) which is guaranteed to run in linear amortized
  26. * time, but has a larger average cost, uses more memory and is patented.
  27. * However the F&G algorithm may be faster for some highly redundant
  28. * files if the parameter max_chain_length (described below) is too large.
  29. *
  30. * ACKNOWLEDGEMENTS
  31. *
  32. * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
  33. * I found it in 'freeze' written by Leonid Broukhis.
  34. * Thanks to many people for bug reports and testing.
  35. *
  36. * REFERENCES
  37. *
  38. * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
  39. * Available in http://tools.ietf.org/html/rfc1951
  40. *
  41. * A description of the Rabin and Karp algorithm is given in the book
  42. * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
  43. *
  44. * Fiala,E.R., and Greene,D.H.
  45. * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
  46. *
  47. */
  48. /* @(#) $Id: deflate.c,v 1.8 2012/02/05 18:10:25 drolon Exp $ */
  49. #include "deflate.h"
  50. const char deflate_copyright[] =
  51. " deflate 1.2.6 Copyright 1995-2012 Jean-loup Gailly and Mark Adler ";
  52. /*
  53. If you use the zlib library in a product, an acknowledgment is welcome
  54. in the documentation of your product. If for some reason you cannot
  55. include such an acknowledgment, I would appreciate that you keep this
  56. copyright string in the executable of your product.
  57. */
  58. /* ===========================================================================
  59. * Function prototypes.
  60. */
  61. typedef enum {
  62. need_more, /* block not completed, need more input or more output */
  63. block_done, /* block flush performed */
  64. finish_started, /* finish started, need only more output at next deflate */
  65. finish_done /* finish done, accept no more input or output */
  66. } block_state;
  67. typedef block_state (*compress_func) OF((deflate_state *s, int flush));
  68. /* Compression function. Returns the block state after the call. */
  69. local void fill_window OF((deflate_state *s));
  70. local block_state deflate_stored OF((deflate_state *s, int flush));
  71. local block_state deflate_fast OF((deflate_state *s, int flush));
  72. #ifndef FASTEST
  73. local block_state deflate_slow OF((deflate_state *s, int flush));
  74. #endif
  75. local block_state deflate_rle OF((deflate_state *s, int flush));
  76. local block_state deflate_huff OF((deflate_state *s, int flush));
  77. local void lm_init OF((deflate_state *s));
  78. local void putShortMSB OF((deflate_state *s, uInt b));
  79. local void flush_pending OF((z_streamp strm));
  80. local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
  81. #ifdef ASMV
  82. void match_init OF((void)); /* asm code initialization */
  83. uInt longest_match OF((deflate_state *s, IPos cur_match));
  84. #else
  85. local uInt longest_match OF((deflate_state *s, IPos cur_match));
  86. #endif
  87. #ifdef DEBUG
  88. local void check_match OF((deflate_state *s, IPos start, IPos match,
  89. int length));
  90. #endif
  91. /* ===========================================================================
  92. * Local data
  93. */
  94. #define NIL 0
  95. /* Tail of hash chains */
  96. #ifndef TOO_FAR
  97. # define TOO_FAR 4096
  98. #endif
  99. /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
  100. /* Values for max_lazy_match, good_match and max_chain_length, depending on
  101. * the desired pack level (0..9). The values given below have been tuned to
  102. * exclude worst case performance for pathological files. Better values may be
  103. * found for specific files.
  104. */
  105. typedef struct config_s {
  106. ush good_length; /* reduce lazy search above this match length */
  107. ush max_lazy; /* do not perform lazy search above this match length */
  108. ush nice_length; /* quit search above this match length */
  109. ush max_chain;
  110. compress_func func;
  111. } config;
  112. #ifdef FASTEST
  113. local const config configuration_table[2] = {
  114. /* good lazy nice chain */
  115. /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
  116. /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */
  117. #else
  118. local const config configuration_table[10] = {
  119. /* good lazy nice chain */
  120. /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
  121. /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */
  122. /* 2 */ {4, 5, 16, 8, deflate_fast},
  123. /* 3 */ {4, 6, 32, 32, deflate_fast},
  124. /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
  125. /* 5 */ {8, 16, 32, 32, deflate_slow},
  126. /* 6 */ {8, 16, 128, 128, deflate_slow},
  127. /* 7 */ {8, 32, 128, 256, deflate_slow},
  128. /* 8 */ {32, 128, 258, 1024, deflate_slow},
  129. /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
  130. #endif
  131. /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
  132. * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
  133. * meaning.
  134. */
  135. #define EQUAL 0
  136. /* result of memcmp for equal strings */
  137. #ifndef NO_DUMMY_DECL
  138. struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
  139. #endif
  140. /* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */
  141. #define RANK(f) (((f) << 1) - ((f) > 4 ? 9 : 0))
  142. /* ===========================================================================
  143. * Update a hash value with the given input byte
  144. * IN assertion: all calls to to UPDATE_HASH are made with consecutive
  145. * input characters, so that a running hash key can be computed from the
  146. * previous key instead of complete recalculation each time.
  147. */
  148. #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
  149. /* ===========================================================================
  150. * Insert string str in the dictionary and set match_head to the previous head
  151. * of the hash chain (the most recent string with same hash key). Return
  152. * the previous length of the hash chain.
  153. * If this file is compiled with -DFASTEST, the compression level is forced
  154. * to 1, and no hash chains are maintained.
  155. * IN assertion: all calls to to INSERT_STRING are made with consecutive
  156. * input characters and the first MIN_MATCH bytes of str are valid
  157. * (except for the last MIN_MATCH-1 bytes of the input file).
  158. */
  159. #ifdef FASTEST
  160. #define INSERT_STRING(s, str, match_head) \
  161. (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
  162. match_head = s->head[s->ins_h], \
  163. s->head[s->ins_h] = (Pos)(str))
  164. #else
  165. #define INSERT_STRING(s, str, match_head) \
  166. (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
  167. match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
  168. s->head[s->ins_h] = (Pos)(str))
  169. #endif
  170. /* ===========================================================================
  171. * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
  172. * prev[] will be initialized on the fly.
  173. */
  174. #define CLEAR_HASH(s) \
  175. s->head[s->hash_size-1] = NIL; \
  176. zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
  177. /* ========================================================================= */
  178. int ZEXPORT deflateInit_(strm, level, version, stream_size)
  179. z_streamp strm;
  180. int level;
  181. const char *version;
  182. int stream_size;
  183. {
  184. return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
  185. Z_DEFAULT_STRATEGY, version, stream_size);
  186. /* To do: ignore strm->next_in if we use it as window */
  187. }
  188. /* ========================================================================= */
  189. int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
  190. version, stream_size)
  191. z_streamp strm;
  192. int level;
  193. int method;
  194. int windowBits;
  195. int memLevel;
  196. int strategy;
  197. const char *version;
  198. int stream_size;
  199. {
  200. deflate_state *s;
  201. int wrap = 1;
  202. static const char my_version[] = ZLIB_VERSION;
  203. ushf *overlay;
  204. /* We overlay pending_buf and d_buf+l_buf. This works since the average
  205. * output size for (length,distance) codes is <= 24 bits.
  206. */
  207. if (version == Z_NULL || version[0] != my_version[0] ||
  208. stream_size != sizeof(z_stream)) {
  209. return Z_VERSION_ERROR;
  210. }
  211. if (strm == Z_NULL) return Z_STREAM_ERROR;
  212. strm->msg = Z_NULL;
  213. if (strm->zalloc == (alloc_func)0) {
  214. #ifdef Z_SOLO
  215. return Z_STREAM_ERROR;
  216. #else
  217. strm->zalloc = zcalloc;
  218. strm->opaque = (voidpf)0;
  219. #endif
  220. }
  221. if (strm->zfree == (free_func)0)
  222. #ifdef Z_SOLO
  223. return Z_STREAM_ERROR;
  224. #else
  225. strm->zfree = zcfree;
  226. #endif
  227. #ifdef FASTEST
  228. if (level != 0) level = 1;
  229. #else
  230. if (level == Z_DEFAULT_COMPRESSION) level = 6;
  231. #endif
  232. if (windowBits < 0) { /* suppress zlib wrapper */
  233. wrap = 0;
  234. windowBits = -windowBits;
  235. }
  236. #ifdef GZIP
  237. else if (windowBits > 15) {
  238. wrap = 2; /* write gzip wrapper instead */
  239. windowBits -= 16;
  240. }
  241. #endif
  242. if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
  243. windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
  244. strategy < 0 || strategy > Z_FIXED) {
  245. return Z_STREAM_ERROR;
  246. }
  247. if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
  248. s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
  249. if (s == Z_NULL) return Z_MEM_ERROR;
  250. strm->state = (struct internal_state FAR *)s;
  251. s->strm = strm;
  252. s->wrap = wrap;
  253. s->gzhead = Z_NULL;
  254. s->w_bits = windowBits;
  255. s->w_size = 1 << s->w_bits;
  256. s->w_mask = s->w_size - 1;
  257. s->hash_bits = memLevel + 7;
  258. s->hash_size = 1 << s->hash_bits;
  259. s->hash_mask = s->hash_size - 1;
  260. s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
  261. s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
  262. s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
  263. s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
  264. s->high_water = 0; /* nothing written to s->window yet */
  265. s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
  266. overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
  267. s->pending_buf = (uchf *) overlay;
  268. s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
  269. if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
  270. s->pending_buf == Z_NULL) {
  271. s->status = FINISH_STATE;
  272. strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
  273. deflateEnd (strm);
  274. return Z_MEM_ERROR;
  275. }
  276. s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
  277. s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
  278. s->level = level;
  279. s->strategy = strategy;
  280. s->method = (Byte)method;
  281. return deflateReset(strm);
  282. }
  283. /* ========================================================================= */
  284. int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
  285. z_streamp strm;
  286. const Bytef *dictionary;
  287. uInt dictLength;
  288. {
  289. deflate_state *s;
  290. uInt str, n;
  291. int wrap;
  292. unsigned avail;
  293. unsigned char *next;
  294. if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
  295. return Z_STREAM_ERROR;
  296. s = strm->state;
  297. wrap = s->wrap;
  298. if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead)
  299. return Z_STREAM_ERROR;
  300. /* when using zlib wrappers, compute Adler-32 for provided dictionary */
  301. if (wrap == 1)
  302. strm->adler = adler32(strm->adler, dictionary, dictLength);
  303. s->wrap = 0; /* avoid computing Adler-32 in read_buf */
  304. /* if dictionary would fill window, just replace the history */
  305. if (dictLength >= s->w_size) {
  306. if (wrap == 0) { /* already empty otherwise */
  307. CLEAR_HASH(s);
  308. s->strstart = 0;
  309. s->block_start = 0L;
  310. s->insert = 0;
  311. }
  312. dictionary += dictLength - s->w_size; /* use the tail */
  313. dictLength = s->w_size;
  314. }
  315. /* insert dictionary into window and hash */
  316. avail = strm->avail_in;
  317. next = strm->next_in;
  318. strm->avail_in = dictLength;
  319. strm->next_in = (Bytef *)dictionary;
  320. fill_window(s);
  321. while (s->lookahead >= MIN_MATCH) {
  322. str = s->strstart;
  323. n = s->lookahead - (MIN_MATCH-1);
  324. do {
  325. UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
  326. #ifndef FASTEST
  327. s->prev[str & s->w_mask] = s->head[s->ins_h];
  328. #endif
  329. s->head[s->ins_h] = (Pos)str;
  330. str++;
  331. } while (--n);
  332. s->strstart = str;
  333. s->lookahead = MIN_MATCH-1;
  334. fill_window(s);
  335. }
  336. s->strstart += s->lookahead;
  337. s->block_start = (long)s->strstart;
  338. s->insert = s->lookahead;
  339. s->lookahead = 0;
  340. s->match_length = s->prev_length = MIN_MATCH-1;
  341. s->match_available = 0;
  342. strm->next_in = next;
  343. strm->avail_in = avail;
  344. s->wrap = wrap;
  345. return Z_OK;
  346. }
  347. /* ========================================================================= */
  348. int ZEXPORT deflateResetKeep (strm)
  349. z_streamp strm;
  350. {
  351. deflate_state *s;
  352. if (strm == Z_NULL || strm->state == Z_NULL ||
  353. strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) {
  354. return Z_STREAM_ERROR;
  355. }
  356. strm->total_in = strm->total_out = 0;
  357. strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
  358. strm->data_type = Z_UNKNOWN;
  359. s = (deflate_state *)strm->state;
  360. s->pending = 0;
  361. s->pending_out = s->pending_buf;
  362. if (s->wrap < 0) {
  363. s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
  364. }
  365. s->status = s->wrap ? INIT_STATE : BUSY_STATE;
  366. strm->adler =
  367. #ifdef GZIP
  368. s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
  369. #endif
  370. adler32(0L, Z_NULL, 0);
  371. s->last_flush = Z_NO_FLUSH;
  372. _tr_init(s);
  373. return Z_OK;
  374. }
  375. /* ========================================================================= */
  376. int ZEXPORT deflateReset (strm)
  377. z_streamp strm;
  378. {
  379. int ret;
  380. ret = deflateResetKeep(strm);
  381. if (ret == Z_OK)
  382. lm_init(strm->state);
  383. return ret;
  384. }
  385. /* ========================================================================= */
  386. int ZEXPORT deflateSetHeader (strm, head)
  387. z_streamp strm;
  388. gz_headerp head;
  389. {
  390. if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
  391. if (strm->state->wrap != 2) return Z_STREAM_ERROR;
  392. strm->state->gzhead = head;
  393. return Z_OK;
  394. }
  395. /* ========================================================================= */
  396. int ZEXPORT deflatePending (strm, pending, bits)
  397. unsigned *pending;
  398. int *bits;
  399. z_streamp strm;
  400. {
  401. if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
  402. if (pending != Z_NULL)
  403. *pending = strm->state->pending;
  404. if (bits != Z_NULL)
  405. *bits = strm->state->bi_valid;
  406. return Z_OK;
  407. }
  408. /* ========================================================================= */
  409. int ZEXPORT deflatePrime (strm, bits, value)
  410. z_streamp strm;
  411. int bits;
  412. int value;
  413. {
  414. deflate_state *s;
  415. int put;
  416. if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
  417. s = strm->state;
  418. if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
  419. return Z_BUF_ERROR;
  420. do {
  421. put = Buf_size - s->bi_valid;
  422. if (put > bits)
  423. put = bits;
  424. s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid);
  425. s->bi_valid += put;
  426. _tr_flush_bits(s);
  427. value >>= put;
  428. bits -= put;
  429. } while (bits);
  430. return Z_OK;
  431. }
  432. /* ========================================================================= */
  433. int ZEXPORT deflateParams(strm, level, strategy)
  434. z_streamp strm;
  435. int level;
  436. int strategy;
  437. {
  438. deflate_state *s;
  439. compress_func func;
  440. int err = Z_OK;
  441. if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
  442. s = strm->state;
  443. #ifdef FASTEST
  444. if (level != 0) level = 1;
  445. #else
  446. if (level == Z_DEFAULT_COMPRESSION) level = 6;
  447. #endif
  448. if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) {
  449. return Z_STREAM_ERROR;
  450. }
  451. func = configuration_table[s->level].func;
  452. if ((strategy != s->strategy || func != configuration_table[level].func) &&
  453. strm->total_in != 0) {
  454. /* Flush the last buffer: */
  455. err = deflate(strm, Z_BLOCK);
  456. }
  457. if (s->level != level) {
  458. s->level = level;
  459. s->max_lazy_match = configuration_table[level].max_lazy;
  460. s->good_match = configuration_table[level].good_length;
  461. s->nice_match = configuration_table[level].nice_length;
  462. s->max_chain_length = configuration_table[level].max_chain;
  463. }
  464. s->strategy = strategy;
  465. return err;
  466. }
  467. /* ========================================================================= */
  468. int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
  469. z_streamp strm;
  470. int good_length;
  471. int max_lazy;
  472. int nice_length;
  473. int max_chain;
  474. {
  475. deflate_state *s;
  476. if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
  477. s = strm->state;
  478. s->good_match = good_length;
  479. s->max_lazy_match = max_lazy;
  480. s->nice_match = nice_length;
  481. s->max_chain_length = max_chain;
  482. return Z_OK;
  483. }
  484. /* =========================================================================
  485. * For the default windowBits of 15 and memLevel of 8, this function returns
  486. * a close to exact, as well as small, upper bound on the compressed size.
  487. * They are coded as constants here for a reason--if the #define's are
  488. * changed, then this function needs to be changed as well. The return
  489. * value for 15 and 8 only works for those exact settings.
  490. *
  491. * For any setting other than those defaults for windowBits and memLevel,
  492. * the value returned is a conservative worst case for the maximum expansion
  493. * resulting from using fixed blocks instead of stored blocks, which deflate
  494. * can emit on compressed data for some combinations of the parameters.
  495. *
  496. * This function could be more sophisticated to provide closer upper bounds for
  497. * every combination of windowBits and memLevel. But even the conservative
  498. * upper bound of about 14% expansion does not seem onerous for output buffer
  499. * allocation.
  500. */
  501. uLong ZEXPORT deflateBound(strm, sourceLen)
  502. z_streamp strm;
  503. uLong sourceLen;
  504. {
  505. deflate_state *s;
  506. uLong complen, wraplen;
  507. Bytef *str;
  508. /* conservative upper bound for compressed data */
  509. complen = sourceLen +
  510. ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
  511. /* if can't get parameters, return conservative bound plus zlib wrapper */
  512. if (strm == Z_NULL || strm->state == Z_NULL)
  513. return complen + 6;
  514. /* compute wrapper length */
  515. s = strm->state;
  516. switch (s->wrap) {
  517. case 0: /* raw deflate */
  518. wraplen = 0;
  519. break;
  520. case 1: /* zlib wrapper */
  521. wraplen = 6 + (s->strstart ? 4 : 0);
  522. break;
  523. case 2: /* gzip wrapper */
  524. wraplen = 18;
  525. if (s->gzhead != Z_NULL) { /* user-supplied gzip header */
  526. if (s->gzhead->extra != Z_NULL)
  527. wraplen += 2 + s->gzhead->extra_len;
  528. str = s->gzhead->name;
  529. if (str != Z_NULL)
  530. do {
  531. wraplen++;
  532. } while (*str++);
  533. str = s->gzhead->comment;
  534. if (str != Z_NULL)
  535. do {
  536. wraplen++;
  537. } while (*str++);
  538. if (s->gzhead->hcrc)
  539. wraplen += 2;
  540. }
  541. break;
  542. default: /* for compiler happiness */
  543. wraplen = 6;
  544. }
  545. /* if not default parameters, return conservative bound */
  546. if (s->w_bits != 15 || s->hash_bits != 8 + 7)
  547. return complen + wraplen;
  548. /* default settings: return tight bound for that case */
  549. return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
  550. (sourceLen >> 25) + 13 - 6 + wraplen;
  551. }
  552. /* =========================================================================
  553. * Put a short in the pending buffer. The 16-bit value is put in MSB order.
  554. * IN assertion: the stream state is correct and there is enough room in
  555. * pending_buf.
  556. */
  557. local void putShortMSB (s, b)
  558. deflate_state *s;
  559. uInt b;
  560. {
  561. put_byte(s, (Byte)(b >> 8));
  562. put_byte(s, (Byte)(b & 0xff));
  563. }
  564. /* =========================================================================
  565. * Flush as much pending output as possible. All deflate() output goes
  566. * through this function so some applications may wish to modify it
  567. * to avoid allocating a large strm->next_out buffer and copying into it.
  568. * (See also read_buf()).
  569. */
  570. local void flush_pending(strm)
  571. z_streamp strm;
  572. {
  573. unsigned len;
  574. deflate_state *s = strm->state;
  575. _tr_flush_bits(s);
  576. len = s->pending;
  577. if (len > strm->avail_out) len = strm->avail_out;
  578. if (len == 0) return;
  579. zmemcpy(strm->next_out, s->pending_out, len);
  580. strm->next_out += len;
  581. s->pending_out += len;
  582. strm->total_out += len;
  583. strm->avail_out -= len;
  584. s->pending -= len;
  585. if (s->pending == 0) {
  586. s->pending_out = s->pending_buf;
  587. }
  588. }
  589. /* ========================================================================= */
  590. int ZEXPORT deflate (strm, flush)
  591. z_streamp strm;
  592. int flush;
  593. {
  594. int old_flush; /* value of flush param for previous deflate call */
  595. deflate_state *s;
  596. if (strm == Z_NULL || strm->state == Z_NULL ||
  597. flush > Z_BLOCK || flush < 0) {
  598. return Z_STREAM_ERROR;
  599. }
  600. s = strm->state;
  601. if (strm->next_out == Z_NULL ||
  602. (strm->next_in == Z_NULL && strm->avail_in != 0) ||
  603. (s->status == FINISH_STATE && flush != Z_FINISH)) {
  604. ERR_RETURN(strm, Z_STREAM_ERROR);
  605. }
  606. if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
  607. s->strm = strm; /* just in case */
  608. old_flush = s->last_flush;
  609. s->last_flush = flush;
  610. /* Write the header */
  611. if (s->status == INIT_STATE) {
  612. #ifdef GZIP
  613. if (s->wrap == 2) {
  614. strm->adler = crc32(0L, Z_NULL, 0);
  615. put_byte(s, 31);
  616. put_byte(s, 139);
  617. put_byte(s, 8);
  618. if (s->gzhead == Z_NULL) {
  619. put_byte(s, 0);
  620. put_byte(s, 0);
  621. put_byte(s, 0);
  622. put_byte(s, 0);
  623. put_byte(s, 0);
  624. put_byte(s, s->level == 9 ? 2 :
  625. (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
  626. 4 : 0));
  627. put_byte(s, OS_CODE);
  628. s->status = BUSY_STATE;
  629. }
  630. else {
  631. put_byte(s, (s->gzhead->text ? 1 : 0) +
  632. (s->gzhead->hcrc ? 2 : 0) +
  633. (s->gzhead->extra == Z_NULL ? 0 : 4) +
  634. (s->gzhead->name == Z_NULL ? 0 : 8) +
  635. (s->gzhead->comment == Z_NULL ? 0 : 16)
  636. );
  637. put_byte(s, (Byte)(s->gzhead->time & 0xff));
  638. put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff));
  639. put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff));
  640. put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff));
  641. put_byte(s, s->level == 9 ? 2 :
  642. (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
  643. 4 : 0));
  644. put_byte(s, s->gzhead->os & 0xff);
  645. if (s->gzhead->extra != Z_NULL) {
  646. put_byte(s, s->gzhead->extra_len & 0xff);
  647. put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
  648. }
  649. if (s->gzhead->hcrc)
  650. strm->adler = crc32(strm->adler, s->pending_buf,
  651. s->pending);
  652. s->gzindex = 0;
  653. s->status = EXTRA_STATE;
  654. }
  655. }
  656. else
  657. #endif
  658. {
  659. uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
  660. uInt level_flags;
  661. if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
  662. level_flags = 0;
  663. else if (s->level < 6)
  664. level_flags = 1;
  665. else if (s->level == 6)
  666. level_flags = 2;
  667. else
  668. level_flags = 3;
  669. header |= (level_flags << 6);
  670. if (s->strstart != 0) header |= PRESET_DICT;
  671. header += 31 - (header % 31);
  672. s->status = BUSY_STATE;
  673. putShortMSB(s, header);
  674. /* Save the adler32 of the preset dictionary: */
  675. if (s->strstart != 0) {
  676. putShortMSB(s, (uInt)(strm->adler >> 16));
  677. putShortMSB(s, (uInt)(strm->adler & 0xffff));
  678. }
  679. strm->adler = adler32(0L, Z_NULL, 0);
  680. }
  681. }
  682. #ifdef GZIP
  683. if (s->status == EXTRA_STATE) {
  684. if (s->gzhead->extra != Z_NULL) {
  685. uInt beg = s->pending; /* start of bytes to update crc */
  686. while (s->gzindex < (s->gzhead->extra_len & 0xffff)) {
  687. if (s->pending == s->pending_buf_size) {
  688. if (s->gzhead->hcrc && s->pending > beg)
  689. strm->adler = crc32(strm->adler, s->pending_buf + beg,
  690. s->pending - beg);
  691. flush_pending(strm);
  692. beg = s->pending;
  693. if (s->pending == s->pending_buf_size)
  694. break;
  695. }
  696. put_byte(s, s->gzhead->extra[s->gzindex]);
  697. s->gzindex++;
  698. }
  699. if (s->gzhead->hcrc && s->pending > beg)
  700. strm->adler = crc32(strm->adler, s->pending_buf + beg,
  701. s->pending - beg);
  702. if (s->gzindex == s->gzhead->extra_len) {
  703. s->gzindex = 0;
  704. s->status = NAME_STATE;
  705. }
  706. }
  707. else
  708. s->status = NAME_STATE;
  709. }
  710. if (s->status == NAME_STATE) {
  711. if (s->gzhead->name != Z_NULL) {
  712. uInt beg = s->pending; /* start of bytes to update crc */
  713. int val;
  714. do {
  715. if (s->pending == s->pending_buf_size) {
  716. if (s->gzhead->hcrc && s->pending > beg)
  717. strm->adler = crc32(strm->adler, s->pending_buf + beg,
  718. s->pending - beg);
  719. flush_pending(strm);
  720. beg = s->pending;
  721. if (s->pending == s->pending_buf_size) {
  722. val = 1;
  723. break;
  724. }
  725. }
  726. val = s->gzhead->name[s->gzindex++];
  727. put_byte(s, val);
  728. } while (val != 0);
  729. if (s->gzhead->hcrc && s->pending > beg)
  730. strm->adler = crc32(strm->adler, s->pending_buf + beg,
  731. s->pending - beg);
  732. if (val == 0) {
  733. s->gzindex = 0;
  734. s->status = COMMENT_STATE;
  735. }
  736. }
  737. else
  738. s->status = COMMENT_STATE;
  739. }
  740. if (s->status == COMMENT_STATE) {
  741. if (s->gzhead->comment != Z_NULL) {
  742. uInt beg = s->pending; /* start of bytes to update crc */
  743. int val;
  744. do {
  745. if (s->pending == s->pending_buf_size) {
  746. if (s->gzhead->hcrc && s->pending > beg)
  747. strm->adler = crc32(strm->adler, s->pending_buf + beg,
  748. s->pending - beg);
  749. flush_pending(strm);
  750. beg = s->pending;
  751. if (s->pending == s->pending_buf_size) {
  752. val = 1;
  753. break;
  754. }
  755. }
  756. val = s->gzhead->comment[s->gzindex++];
  757. put_byte(s, val);
  758. } while (val != 0);
  759. if (s->gzhead->hcrc && s->pending > beg)
  760. strm->adler = crc32(strm->adler, s->pending_buf + beg,
  761. s->pending - beg);
  762. if (val == 0)
  763. s->status = HCRC_STATE;
  764. }
  765. else
  766. s->status = HCRC_STATE;
  767. }
  768. if (s->status == HCRC_STATE) {
  769. if (s->gzhead->hcrc) {
  770. if (s->pending + 2 > s->pending_buf_size)
  771. flush_pending(strm);
  772. if (s->pending + 2 <= s->pending_buf_size) {
  773. put_byte(s, (Byte)(strm->adler & 0xff));
  774. put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
  775. strm->adler = crc32(0L, Z_NULL, 0);
  776. s->status = BUSY_STATE;
  777. }
  778. }
  779. else
  780. s->status = BUSY_STATE;
  781. }
  782. #endif
  783. /* Flush as much pending output as possible */
  784. if (s->pending != 0) {
  785. flush_pending(strm);
  786. if (strm->avail_out == 0) {
  787. /* Since avail_out is 0, deflate will be called again with
  788. * more output space, but possibly with both pending and
  789. * avail_in equal to zero. There won't be anything to do,
  790. * but this is not an error situation so make sure we
  791. * return OK instead of BUF_ERROR at next call of deflate:
  792. */
  793. s->last_flush = -1;
  794. return Z_OK;
  795. }
  796. /* Make sure there is something to do and avoid duplicate consecutive
  797. * flushes. For repeated and useless calls with Z_FINISH, we keep
  798. * returning Z_STREAM_END instead of Z_BUF_ERROR.
  799. */
  800. } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) &&
  801. flush != Z_FINISH) {
  802. ERR_RETURN(strm, Z_BUF_ERROR);
  803. }
  804. /* User must not provide more input after the first FINISH: */
  805. if (s->status == FINISH_STATE && strm->avail_in != 0) {
  806. ERR_RETURN(strm, Z_BUF_ERROR);
  807. }
  808. /* Start a new block or continue the current one.
  809. */
  810. if (strm->avail_in != 0 || s->lookahead != 0 ||
  811. (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
  812. block_state bstate;
  813. bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
  814. (s->strategy == Z_RLE ? deflate_rle(s, flush) :
  815. (*(configuration_table[s->level].func))(s, flush));
  816. if (bstate == finish_started || bstate == finish_done) {
  817. s->status = FINISH_STATE;
  818. }
  819. if (bstate == need_more || bstate == finish_started) {
  820. if (strm->avail_out == 0) {
  821. s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
  822. }
  823. return Z_OK;
  824. /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
  825. * of deflate should use the same flush parameter to make sure
  826. * that the flush is complete. So we don't have to output an
  827. * empty block here, this will be done at next call. This also
  828. * ensures that for a very small output buffer, we emit at most
  829. * one empty block.
  830. */
  831. }
  832. if (bstate == block_done) {
  833. if (flush == Z_PARTIAL_FLUSH) {
  834. _tr_align(s);
  835. } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
  836. _tr_stored_block(s, (char*)0, 0L, 0);
  837. /* For a full flush, this empty block will be recognized
  838. * as a special marker by inflate_sync().
  839. */
  840. if (flush == Z_FULL_FLUSH) {
  841. CLEAR_HASH(s); /* forget history */
  842. if (s->lookahead == 0) {
  843. s->strstart = 0;
  844. s->block_start = 0L;
  845. s->insert = 0;
  846. }
  847. }
  848. }
  849. flush_pending(strm);
  850. if (strm->avail_out == 0) {
  851. s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
  852. return Z_OK;
  853. }
  854. }
  855. }
  856. Assert(strm->avail_out > 0, "bug2");
  857. if (flush != Z_FINISH) return Z_OK;
  858. if (s->wrap <= 0) return Z_STREAM_END;
  859. /* Write the trailer */
  860. #ifdef GZIP
  861. if (s->wrap == 2) {
  862. put_byte(s, (Byte)(strm->adler & 0xff));
  863. put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
  864. put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
  865. put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
  866. put_byte(s, (Byte)(strm->total_in & 0xff));
  867. put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
  868. put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
  869. put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
  870. }
  871. else
  872. #endif
  873. {
  874. putShortMSB(s, (uInt)(strm->adler >> 16));
  875. putShortMSB(s, (uInt)(strm->adler & 0xffff));
  876. }
  877. flush_pending(strm);
  878. /* If avail_out is zero, the application will call deflate again
  879. * to flush the rest.
  880. */
  881. if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
  882. return s->pending != 0 ? Z_OK : Z_STREAM_END;
  883. }
  884. /* ========================================================================= */
  885. int ZEXPORT deflateEnd (strm)
  886. z_streamp strm;
  887. {
  888. int status;
  889. if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
  890. status = strm->state->status;
  891. if (status != INIT_STATE &&
  892. status != EXTRA_STATE &&
  893. status != NAME_STATE &&
  894. status != COMMENT_STATE &&
  895. status != HCRC_STATE &&
  896. status != BUSY_STATE &&
  897. status != FINISH_STATE) {
  898. return Z_STREAM_ERROR;
  899. }
  900. /* Deallocate in reverse order of allocations: */
  901. TRY_FREE(strm, strm->state->pending_buf);
  902. TRY_FREE(strm, strm->state->head);
  903. TRY_FREE(strm, strm->state->prev);
  904. TRY_FREE(strm, strm->state->window);
  905. ZFREE(strm, strm->state);
  906. strm->state = Z_NULL;
  907. return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
  908. }
  909. /* =========================================================================
  910. * Copy the source state to the destination state.
  911. * To simplify the source, this is not supported for 16-bit MSDOS (which
  912. * doesn't have enough memory anyway to duplicate compression states).
  913. */
  914. int ZEXPORT deflateCopy (dest, source)
  915. z_streamp dest;
  916. z_streamp source;
  917. {
  918. #ifdef MAXSEG_64K
  919. return Z_STREAM_ERROR;
  920. #else
  921. deflate_state *ds;
  922. deflate_state *ss;
  923. ushf *overlay;
  924. if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) {
  925. return Z_STREAM_ERROR;
  926. }
  927. ss = source->state;
  928. zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
  929. ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
  930. if (ds == Z_NULL) return Z_MEM_ERROR;
  931. dest->state = (struct internal_state FAR *) ds;
  932. zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state));
  933. ds->strm = dest;
  934. ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
  935. ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
  936. ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
  937. overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
  938. ds->pending_buf = (uchf *) overlay;
  939. if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
  940. ds->pending_buf == Z_NULL) {
  941. deflateEnd (dest);
  942. return Z_MEM_ERROR;
  943. }
  944. /* following zmemcpy do not work for 16-bit MSDOS */
  945. zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
  946. zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
  947. zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
  948. zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
  949. ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
  950. ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
  951. ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
  952. ds->l_desc.dyn_tree = ds->dyn_ltree;
  953. ds->d_desc.dyn_tree = ds->dyn_dtree;
  954. ds->bl_desc.dyn_tree = ds->bl_tree;
  955. return Z_OK;
  956. #endif /* MAXSEG_64K */
  957. }
  958. /* ===========================================================================
  959. * Read a new buffer from the current input stream, update the adler32
  960. * and total number of bytes read. All deflate() input goes through
  961. * this function so some applications may wish to modify it to avoid
  962. * allocating a large strm->next_in buffer and copying from it.
  963. * (See also flush_pending()).
  964. */
  965. local int read_buf(strm, buf, size)
  966. z_streamp strm;
  967. Bytef *buf;
  968. unsigned size;
  969. {
  970. unsigned len = strm->avail_in;
  971. if (len > size) len = size;
  972. if (len == 0) return 0;
  973. strm->avail_in -= len;
  974. zmemcpy(buf, strm->next_in, len);
  975. if (strm->state->wrap == 1) {
  976. strm->adler = adler32(strm->adler, buf, len);
  977. }
  978. #ifdef GZIP
  979. else if (strm->state->wrap == 2) {
  980. strm->adler = crc32(strm->adler, buf, len);
  981. }
  982. #endif
  983. strm->next_in += len;
  984. strm->total_in += len;
  985. return (int)len;
  986. }
  987. /* ===========================================================================
  988. * Initialize the "longest match" routines for a new zlib stream
  989. */
  990. local void lm_init (s)
  991. deflate_state *s;
  992. {
  993. s->window_size = (ulg)2L*s->w_size;
  994. CLEAR_HASH(s);
  995. /* Set the default configuration parameters:
  996. */
  997. s->max_lazy_match = configuration_table[s->level].max_lazy;
  998. s->good_match = configuration_table[s->level].good_length;
  999. s->nice_match = configuration_table[s->level].nice_length;
  1000. s->max_chain_length = configuration_table[s->level].max_chain;
  1001. s->strstart = 0;
  1002. s->block_start = 0L;
  1003. s->lookahead = 0;
  1004. s->insert = 0;
  1005. s->match_length = s->prev_length = MIN_MATCH-1;
  1006. s->match_available = 0;
  1007. s->ins_h = 0;
  1008. #ifndef FASTEST
  1009. #ifdef ASMV
  1010. match_init(); /* initialize the asm code */
  1011. #endif
  1012. #endif
  1013. }
  1014. #ifndef FASTEST
  1015. /* ===========================================================================
  1016. * Set match_start to the longest match starting at the given string and
  1017. * return its length. Matches shorter or equal to prev_length are discarded,
  1018. * in which case the result is equal to prev_length and match_start is
  1019. * garbage.
  1020. * IN assertions: cur_match is the head of the hash chain for the current
  1021. * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
  1022. * OUT assertion: the match length is not greater than s->lookahead.
  1023. */
  1024. #ifndef ASMV
  1025. /* For 80x86 and 680x0, an optimized version will be provided in match.asm or
  1026. * match.S. The code will be functionally equivalent.
  1027. */
  1028. local uInt longest_match(s, cur_match)
  1029. deflate_state *s;
  1030. IPos cur_match; /* current match */
  1031. {
  1032. unsigned chain_length = s->max_chain_length;/* max hash chain length */
  1033. register Bytef *scan = s->window + s->strstart; /* current string */
  1034. register Bytef *match; /* matched string */
  1035. register int len; /* length of current match */
  1036. int best_len = s->prev_length; /* best match length so far */
  1037. int nice_match = s->nice_match; /* stop if match long enough */
  1038. IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
  1039. s->strstart - (IPos)MAX_DIST(s) : NIL;
  1040. /* Stop when cur_match becomes <= limit. To simplify the code,
  1041. * we prevent matches with the string of window index 0.
  1042. */
  1043. Posf *prev = s->prev;
  1044. uInt wmask = s->w_mask;
  1045. #ifdef UNALIGNED_OK
  1046. /* Compare two bytes at a time. Note: this is not always beneficial.
  1047. * Try with and without -DUNALIGNED_OK to check.
  1048. */
  1049. register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
  1050. register ush scan_start = *(ushf*)scan;
  1051. register ush scan_end = *(ushf*)(scan+best_len-1);
  1052. #else
  1053. register Bytef *strend = s->window + s->strstart + MAX_MATCH;
  1054. register Byte scan_end1 = scan[best_len-1];
  1055. register Byte scan_end = scan[best_len];
  1056. #endif
  1057. /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
  1058. * It is easy to get rid of this optimization if necessary.
  1059. */
  1060. Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
  1061. /* Do not waste too much time if we already have a good match: */
  1062. if (s->prev_length >= s->good_match) {
  1063. chain_length >>= 2;
  1064. }
  1065. /* Do not look for matches beyond the end of the input. This is necessary
  1066. * to make deflate deterministic.
  1067. */
  1068. if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
  1069. Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
  1070. do {
  1071. Assert(cur_match < s->strstart, "no future");
  1072. match = s->window + cur_match;
  1073. /* Skip to next match if the match length cannot increase
  1074. * or if the match length is less than 2. Note that the checks below
  1075. * for insufficient lookahead only occur occasionally for performance
  1076. * reasons. Therefore uninitialized memory will be accessed, and
  1077. * conditional jumps will be made that depend on those values.
  1078. * However the length of the match is limited to the lookahead, so
  1079. * the output of deflate is not affected by the uninitialized values.
  1080. */
  1081. #if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
  1082. /* This code assumes sizeof(unsigned short) == 2. Do not use
  1083. * UNALIGNED_OK if your compiler uses a different size.
  1084. */
  1085. if (*(ushf*)(match+best_len-1) != scan_end ||
  1086. *(ushf*)match != scan_start) continue;
  1087. /* It is not necessary to compare scan[2] and match[2] since they are
  1088. * always equal when the other bytes match, given that the hash keys
  1089. * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
  1090. * strstart+3, +5, ... up to strstart+257. We check for insufficient
  1091. * lookahead only every 4th comparison; the 128th check will be made
  1092. * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
  1093. * necessary to put more guard bytes at the end of the window, or
  1094. * to check more often for insufficient lookahead.
  1095. */
  1096. Assert(scan[2] == match[2], "scan[2]?");
  1097. scan++, match++;
  1098. do {
  1099. } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
  1100. *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
  1101. *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
  1102. *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
  1103. scan < strend);
  1104. /* The funny "do {}" generates better code on most compilers */
  1105. /* Here, scan <= window+strstart+257 */
  1106. Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
  1107. if (*scan == *match) scan++;
  1108. len = (MAX_MATCH - 1) - (int)(strend-scan);
  1109. scan = strend - (MAX_MATCH-1);
  1110. #else /* UNALIGNED_OK */
  1111. if (match[best_len] != scan_end ||
  1112. match[best_len-1] != scan_end1 ||
  1113. *match != *scan ||
  1114. *++match != scan[1]) continue;
  1115. /* The check at best_len-1 can be removed because it will be made
  1116. * again later. (This heuristic is not always a win.)
  1117. * It is not necessary to compare scan[2] and match[2] since they
  1118. * are always equal when the other bytes match, given that
  1119. * the hash keys are equal and that HASH_BITS >= 8.
  1120. */
  1121. scan += 2, match++;
  1122. Assert(*scan == *match, "match[2]?");
  1123. /* We check for insufficient lookahead only every 8th comparison;
  1124. * the 256th check will be made at strstart+258.
  1125. */
  1126. do {
  1127. } while (*++scan == *++match && *++scan == *++match &&
  1128. *++scan == *++match && *++scan == *++match &&
  1129. *++scan == *++match && *++scan == *++match &&
  1130. *++scan == *++match && *++scan == *++match &&
  1131. scan < strend);
  1132. Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
  1133. len = MAX_MATCH - (int)(strend - scan);
  1134. scan = strend - MAX_MATCH;
  1135. #endif /* UNALIGNED_OK */
  1136. if (len > best_len) {
  1137. s->match_start = cur_match;
  1138. best_len = len;
  1139. if (len >= nice_match) break;
  1140. #ifdef UNALIGNED_OK
  1141. scan_end = *(ushf*)(scan+best_len-1);
  1142. #else
  1143. scan_end1 = scan[best_len-1];
  1144. scan_end = scan[best_len];
  1145. #endif
  1146. }
  1147. } while ((cur_match = prev[cur_match & wmask]) > limit
  1148. && --chain_length != 0);
  1149. if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
  1150. return s->lookahead;
  1151. }
  1152. #endif /* ASMV */
  1153. #else /* FASTEST */
  1154. /* ---------------------------------------------------------------------------
  1155. * Optimized version for FASTEST only
  1156. */
  1157. local uInt longest_match(s, cur_match)
  1158. deflate_state *s;
  1159. IPos cur_match; /* current match */
  1160. {
  1161. register Bytef *scan = s->window + s->strstart; /* current string */
  1162. register Bytef *match; /* matched string */
  1163. register int len; /* length of current match */
  1164. register Bytef *strend = s->window + s->strstart + MAX_MATCH;
  1165. /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
  1166. * It is easy to get rid of this optimization if necessary.
  1167. */
  1168. Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
  1169. Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
  1170. Assert(cur_match < s->strstart, "no future");
  1171. match = s->window + cur_match;
  1172. /* Return failure if the match length is less than 2:
  1173. */
  1174. if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
  1175. /* The check at best_len-1 can be removed because it will be made
  1176. * again later. (This heuristic is not always a win.)
  1177. * It is not necessary to compare scan[2] and match[2] since they
  1178. * are always equal when the other bytes match, given that
  1179. * the hash keys are equal and that HASH_BITS >= 8.
  1180. */
  1181. scan += 2, match += 2;
  1182. Assert(*scan == *match, "match[2]?");
  1183. /* We check for insufficient lookahead only every 8th comparison;
  1184. * the 256th check will be made at strstart+258.
  1185. */
  1186. do {
  1187. } while (*++scan == *++match && *++scan == *++match &&
  1188. *++scan == *++match && *++scan == *++match &&
  1189. *++scan == *++match && *++scan == *++match &&
  1190. *++scan == *++match && *++scan == *++match &&
  1191. scan < strend);
  1192. Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
  1193. len = MAX_MATCH - (int)(strend - scan);
  1194. if (len < MIN_MATCH) return MIN_MATCH - 1;
  1195. s->match_start = cur_match;
  1196. return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
  1197. }
  1198. #endif /* FASTEST */
  1199. #ifdef DEBUG
  1200. /* ===========================================================================
  1201. * Check that the match at match_start is indeed a match.
  1202. */
  1203. local void check_match(s, start, match, length)
  1204. deflate_state *s;
  1205. IPos start, match;
  1206. int length;
  1207. {
  1208. /* check that the match is indeed a match */
  1209. if (zmemcmp(s->window + match,
  1210. s->window + start, length) != EQUAL) {
  1211. fprintf(stderr, " start %u, match %u, length %d\n",
  1212. start, match, length);
  1213. do {
  1214. fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
  1215. } while (--length != 0);
  1216. z_error("invalid match");
  1217. }
  1218. if (z_verbose > 1) {
  1219. fprintf(stderr,"\\[%d,%d]", start-match, length);
  1220. do { putc(s->window[start++], stderr); } while (--length != 0);
  1221. }
  1222. }
  1223. #else
  1224. # define check_match(s, start, match, length)
  1225. #endif /* DEBUG */
  1226. /* ===========================================================================
  1227. * Fill the window when the lookahead becomes insufficient.
  1228. * Updates strstart and lookahead.
  1229. *
  1230. * IN assertion: lookahead < MIN_LOOKAHEAD
  1231. * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
  1232. * At least one byte has been read, or avail_in == 0; reads are
  1233. * performed for at least two bytes (required for the zip translate_eol
  1234. * option -- not supported here).
  1235. */
  1236. local void fill_window(s)
  1237. deflate_state *s;
  1238. {
  1239. register unsigned n, m;
  1240. register Posf *p;
  1241. unsigned more; /* Amount of free space at the end of the window. */
  1242. uInt wsize = s->w_size;
  1243. Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
  1244. do {
  1245. more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
  1246. /* Deal with !@#$% 64K limit: */
  1247. if (sizeof(int) <= 2) {
  1248. if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
  1249. more = wsize;
  1250. } else if (more == (unsigned)(-1)) {
  1251. /* Very unlikely, but possible on 16 bit machine if
  1252. * strstart == 0 && lookahead == 1 (input done a byte at time)
  1253. */
  1254. more--;
  1255. }
  1256. }
  1257. /* If the window is almost full and there is insufficient lookahead,
  1258. * move the upper half to the lower one to make room in the upper half.
  1259. */
  1260. if (s->strstart >= wsize+MAX_DIST(s)) {
  1261. zmemcpy(s->window, s->window+wsize, (unsigned)wsize);
  1262. s->match_start -= wsize;
  1263. s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
  1264. s->block_start -= (long) wsize;
  1265. /* Slide the hash table (could be avoided with 32 bit values
  1266. at the expense of memory usage). We slide even when level == 0
  1267. to keep the hash table consistent if we switch back to level > 0
  1268. later. (Using level 0 permanently is not an optimal usage of
  1269. zlib, so we don't care about this pathological case.)
  1270. */
  1271. n = s->hash_size;
  1272. p = &s->head[n];
  1273. do {
  1274. m = *--p;
  1275. *p = (Pos)(m >= wsize ? m-wsize : NIL);
  1276. } while (--n);
  1277. n = wsize;
  1278. #ifndef FASTEST
  1279. p = &s->prev[n];
  1280. do {
  1281. m = *--p;
  1282. *p = (Pos)(m >= wsize ? m-wsize : NIL);
  1283. /* If n is not on any hash chain, prev[n] is garbage but
  1284. * its value will never be used.
  1285. */
  1286. } while (--n);
  1287. #endif
  1288. more += wsize;
  1289. }
  1290. if (s->strm->avail_in == 0) break;
  1291. /* If there was no sliding:
  1292. * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
  1293. * more == window_size - lookahead - strstart
  1294. * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
  1295. * => more >= window_size - 2*WSIZE + 2
  1296. * In the BIG_MEM or MMAP case (not yet supported),
  1297. * window_size == input_size + MIN_LOOKAHEAD &&
  1298. * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
  1299. * Otherwise, window_size == 2*WSIZE so more >= 2.
  1300. * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
  1301. */
  1302. Assert(more >= 2, "more < 2");
  1303. n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
  1304. s->lookahead += n;
  1305. /* Initialize the hash value now that we have some input: */
  1306. if (s->lookahead + s->insert >= MIN_MATCH) {
  1307. uInt str = s->strstart - s->insert;
  1308. s->ins_h = s->window[str];
  1309. UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
  1310. #if MIN_MATCH != 3
  1311. Call UPDATE_HASH() MIN_MATCH-3 more times
  1312. #endif
  1313. while (s->insert) {
  1314. UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
  1315. #ifndef FASTEST
  1316. s->prev[str & s->w_mask] = s->head[s->ins_h];
  1317. #endif
  1318. s->head[s->ins_h] = (Pos)str;
  1319. str++;
  1320. s->insert--;
  1321. if (s->lookahead + s->insert < MIN_MATCH)
  1322. break;
  1323. }
  1324. }
  1325. /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
  1326. * but this is not important since only literal bytes will be emitted.
  1327. */
  1328. } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
  1329. /* If the WIN_INIT bytes after the end of the current data have never been
  1330. * written, then zero those bytes in order to avoid memory check reports of
  1331. * the use of uninitialized (or uninitialised as Julian writes) bytes by
  1332. * the longest match routines. Update the high water mark for the next
  1333. * time through here. WIN_INIT is set to MAX_MATCH since the longest match
  1334. * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
  1335. */
  1336. if (s->high_water < s->window_size) {
  1337. ulg curr = s->strstart + (ulg)(s->lookahead);
  1338. ulg init;
  1339. if (s->high_water < curr) {
  1340. /* Previous high water mark below current data -- zero WIN_INIT
  1341. * bytes or up to end of window, whichever is less.
  1342. */
  1343. init = s->window_size - curr;
  1344. if (init > WIN_INIT)
  1345. init = WIN_INIT;
  1346. zmemzero(s->window + curr, (unsigned)init);
  1347. s->high_water = curr + init;
  1348. }
  1349. else if (s->high_water < (ulg)curr + WIN_INIT) {
  1350. /* High water mark at or above current data, but below current data
  1351. * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
  1352. * to end of window, whichever is less.
  1353. */
  1354. init = (ulg)curr + WIN_INIT - s->high_water;
  1355. if (init > s->window_size - s->high_water)
  1356. init = s->window_size - s->high_water;
  1357. zmemzero(s->window + s->high_water, (unsigned)init);
  1358. s->high_water += init;
  1359. }
  1360. }
  1361. Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
  1362. "not enough room for search");
  1363. }
  1364. /* ===========================================================================
  1365. * Flush the current block, with given end-of-file flag.
  1366. * IN assertion: strstart is set to the end of the current match.
  1367. */
  1368. #define FLUSH_BLOCK_ONLY(s, last) { \
  1369. _tr_flush_block(s, (s->block_start >= 0L ? \
  1370. (charf *)&s->window[(unsigned)s->block_start] : \
  1371. (charf *)Z_NULL), \
  1372. (ulg)((long)s->strstart - s->block_start), \
  1373. (last)); \
  1374. s->block_start = s->strstart; \
  1375. flush_pending(s->strm); \
  1376. Tracev((stderr,"[FLUSH]")); \
  1377. }
  1378. /* Same but force premature exit if necessary. */
  1379. #define FLUSH_BLOCK(s, last) { \
  1380. FLUSH_BLOCK_ONLY(s, last); \
  1381. if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \
  1382. }
  1383. /* ===========================================================================
  1384. * Copy without compression as much as possible from the input stream, return
  1385. * the current block state.
  1386. * This function does not insert new strings in the dictionary since
  1387. * uncompressible data is probably not useful. This function is used
  1388. * only for the level=0 compression option.
  1389. * NOTE: this function should be optimized to avoid extra copying from
  1390. * window to pending_buf.
  1391. */
  1392. local block_state deflate_stored(s, flush)
  1393. deflate_state *s;
  1394. int flush;
  1395. {
  1396. /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
  1397. * to pending_buf_size, and each stored block has a 5 byte header:
  1398. */
  1399. ulg max_block_size = 0xffff;
  1400. ulg max_start;
  1401. if (max_block_size > s->pending_buf_size - 5) {
  1402. max_block_size = s->pending_buf_size - 5;
  1403. }
  1404. /* Copy as much as possible from input to output: */
  1405. for (;;) {
  1406. /* Fill the window as much as possible: */
  1407. if (s->lookahead <= 1) {
  1408. Assert(s->strstart < s->w_size+MAX_DIST(s) ||
  1409. s->block_start >= (long)s->w_size, "slide too late");
  1410. fill_window(s);
  1411. if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
  1412. if (s->lookahead == 0) break; /* flush the current block */
  1413. }
  1414. Assert(s->block_start >= 0L, "block gone");
  1415. s->strstart += s->lookahead;
  1416. s->lookahead = 0;
  1417. /* Emit a stored block if pending_buf will be full: */
  1418. max_start = s->block_start + max_block_size;
  1419. if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
  1420. /* strstart == 0 is possible when wraparound on 16-bit machine */
  1421. s->lookahead = (uInt)(s->strstart - max_start);
  1422. s->strstart = (uInt)max_start;
  1423. FLUSH_BLOCK(s, 0);
  1424. }
  1425. /* Flush if we may have to slide, otherwise block_start may become
  1426. * negative and the data will be gone:
  1427. */
  1428. if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
  1429. FLUSH_BLOCK(s, 0);
  1430. }
  1431. }
  1432. s->insert = 0;
  1433. if (flush == Z_FINISH) {
  1434. FLUSH_BLOCK(s, 1);
  1435. return finish_done;
  1436. }
  1437. if ((long)s->strstart > s->block_start)
  1438. FLUSH_BLOCK(s, 0);
  1439. return block_done;
  1440. }
  1441. /* ===========================================================================
  1442. * Compress as much as possible from the input stream, return the current
  1443. * block state.
  1444. * This function does not perform lazy evaluation of matches and inserts
  1445. * new strings in the dictionary only for unmatched strings or for short
  1446. * matches. It is used only for the fast compression options.
  1447. */
  1448. local block_state deflate_fast(s, flush)
  1449. deflate_state *s;
  1450. int flush;
  1451. {
  1452. IPos hash_head; /* head of the hash chain */
  1453. int bflush; /* set if current block must be flushed */
  1454. for (;;) {
  1455. /* Make sure that we always have enough lookahead, except
  1456. * at the end of the input file. We need MAX_MATCH bytes
  1457. * for the next match, plus MIN_MATCH bytes to insert the
  1458. * string following the next match.
  1459. */
  1460. if (s->lookahead < MIN_LOOKAHEAD) {
  1461. fill_window(s);
  1462. if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
  1463. return need_more;
  1464. }
  1465. if (s->lookahead == 0) break; /* flush the current block */
  1466. }
  1467. /* Insert the string window[strstart .. strstart+2] in the
  1468. * dictionary, and set hash_head to the head of the hash chain:
  1469. */
  1470. hash_head = NIL;
  1471. if (s->lookahead >= MIN_MATCH) {
  1472. INSERT_STRING(s, s->strstart, hash_head);
  1473. }
  1474. /* Find the longest match, discarding those <= prev_length.
  1475. * At this point we have always match_length < MIN_MATCH
  1476. */
  1477. if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
  1478. /* To simplify the code, we prevent matches with the string
  1479. * of window index 0 (in particular we have to avoid a match
  1480. * of the string with itself at the start of the input file).
  1481. */
  1482. s->match_length = longest_match (s, hash_head);
  1483. /* longest_match() sets match_start */
  1484. }
  1485. if (s->match_length >= MIN_MATCH) {
  1486. check_match(s, s->strstart, s->match_start, s->match_length);
  1487. _tr_tally_dist(s, s->strstart - s->match_start,
  1488. s->match_length - MIN_MATCH, bflush);
  1489. s->lookahead -= s->match_length;
  1490. /* Insert new strings in the hash table only if the match length
  1491. * is not too large. This saves time but degrades compression.
  1492. */
  1493. #ifndef FASTEST
  1494. if (s->match_length <= s->max_insert_length &&
  1495. s->lookahead >= MIN_MATCH) {
  1496. s->match_length--; /* string at strstart already in table */
  1497. do {
  1498. s->strstart++;
  1499. INSERT_STRING(s, s->strstart, hash_head);
  1500. /* strstart never exceeds WSIZE-MAX_MATCH, so there are
  1501. * always MIN_MATCH bytes ahead.
  1502. */
  1503. } while (--s->match_length != 0);
  1504. s->strstart++;
  1505. } else
  1506. #endif
  1507. {
  1508. s->strstart += s->match_length;
  1509. s->match_length = 0;
  1510. s->ins_h = s->window[s->strstart];
  1511. UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
  1512. #if MIN_MATCH != 3
  1513. Call UPDATE_HASH() MIN_MATCH-3 more times
  1514. #endif
  1515. /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
  1516. * matter since it will be recomputed at next deflate call.
  1517. */
  1518. }
  1519. } else {
  1520. /* No match, output a literal byte */
  1521. Tracevv((stderr,"%c", s->window[s->strstart]));
  1522. _tr_tally_lit (s, s->window[s->strstart], bflush);
  1523. s->lookahead--;
  1524. s->strstart++;
  1525. }
  1526. if (bflush) FLUSH_BLOCK(s, 0);
  1527. }
  1528. s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
  1529. if (flush == Z_FINISH) {
  1530. FLUSH_BLOCK(s, 1);
  1531. return finish_done;
  1532. }
  1533. if (s->last_lit)
  1534. FLUSH_BLOCK(s, 0);
  1535. return block_done;
  1536. }
  1537. #ifndef FASTEST
  1538. /* ===========================================================================
  1539. * Same as above, but achieves better compression. We use a lazy
  1540. * evaluation for matches: a match is finally adopted only if there is
  1541. * no better match at the next window position.
  1542. */
  1543. local block_state deflate_slow(s, flush)
  1544. deflate_state *s;
  1545. int flush;
  1546. {
  1547. IPos hash_head; /* head of hash chain */
  1548. int bflush; /* set if current block must be flushed */
  1549. /* Process the input block. */
  1550. for (;;) {
  1551. /* Make sure that we always have enough lookahead, except
  1552. * at the end of the input file. We need MAX_MATCH bytes
  1553. * for the next match, plus MIN_MATCH bytes to insert the
  1554. * string following the next match.
  1555. */
  1556. if (s->lookahead < MIN_LOOKAHEAD) {
  1557. fill_window(s);
  1558. if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
  1559. return need_more;
  1560. }
  1561. if (s->lookahead == 0) break; /* flush the current block */
  1562. }
  1563. /* Insert the string window[strstart .. strstart+2] in the
  1564. * dictionary, and set hash_head to the head of the hash chain:
  1565. */
  1566. hash_head = NIL;
  1567. if (s->lookahead >= MIN_MATCH) {
  1568. INSERT_STRING(s, s->strstart, hash_head);
  1569. }
  1570. /* Find the longest match, discarding those <= prev_length.
  1571. */
  1572. s->prev_length = s->match_length, s->prev_match = s->match_start;
  1573. s->match_length = MIN_MATCH-1;
  1574. if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
  1575. s->strstart - hash_head <= MAX_DIST(s)) {
  1576. /* To simplify the code, we prevent matches with the string
  1577. * of window index 0 (in particular we have to avoid a match
  1578. * of the string with itself at the start of the input file).
  1579. */
  1580. s->match_length = longest_match (s, hash_head);
  1581. /* longest_match() sets match_start */
  1582. if (s->match_length <= 5 && (s->strategy == Z_FILTERED
  1583. #if TOO_FAR <= 32767
  1584. || (s->match_length == MIN_MATCH &&
  1585. s->strstart - s->match_start > TOO_FAR)
  1586. #endif
  1587. )) {
  1588. /* If prev_match is also MIN_MATCH, match_start is garbage
  1589. * but we will ignore the current match anyway.
  1590. */
  1591. s->match_length = MIN_MATCH-1;
  1592. }
  1593. }
  1594. /* If there was a match at the previous step and the current
  1595. * match is not better, output the previous match:
  1596. */
  1597. if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
  1598. uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
  1599. /* Do not insert strings in hash table beyond this. */
  1600. check_match(s, s->strstart-1, s->prev_match, s->prev_length);
  1601. _tr_tally_dist(s, s->strstart -1 - s->prev_match,
  1602. s->prev_length - MIN_MATCH, bflush);
  1603. /* Insert in hash table all strings up to the end of the match.
  1604. * strstart-1 and strstart are already inserted. If there is not
  1605. * enough lookahead, the last two strings are not inserted in
  1606. * the hash table.
  1607. */
  1608. s->lookahead -= s->prev_length-1;
  1609. s->prev_length -= 2;
  1610. do {
  1611. if (++s->strstart <= max_insert) {
  1612. INSERT_STRING(s, s->strstart, hash_head);
  1613. }
  1614. } while (--s->prev_length != 0);
  1615. s->match_available = 0;
  1616. s->match_length = MIN_MATCH-1;
  1617. s->strstart++;
  1618. if (bflush) FLUSH_BLOCK(s, 0);
  1619. } else if (s->match_available) {
  1620. /* If there was no match at the previous position, output a
  1621. * single literal. If there was a match but the current match
  1622. * is longer, truncate the previous match to a single literal.
  1623. */
  1624. Tracevv((stderr,"%c", s->window[s->strstart-1]));
  1625. _tr_tally_lit(s, s->window[s->strstart-1], bflush);
  1626. if (bflush) {
  1627. FLUSH_BLOCK_ONLY(s, 0);
  1628. }
  1629. s->strstart++;
  1630. s->lookahead--;
  1631. if (s->strm->avail_out == 0) return need_more;
  1632. } else {
  1633. /* There is no previous match to compare with, wait for
  1634. * the next step to decide.
  1635. */
  1636. s->match_available = 1;
  1637. s->strstart++;
  1638. s->lookahead--;
  1639. }
  1640. }
  1641. Assert (flush != Z_NO_FLUSH, "no flush?");
  1642. if (s->match_available) {
  1643. Tracevv((stderr,"%c", s->window[s->strstart-1]));
  1644. _tr_tally_lit(s, s->window[s->strstart-1], bflush);
  1645. s->match_available = 0;
  1646. }
  1647. s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
  1648. if (flush == Z_FINISH) {
  1649. FLUSH_BLOCK(s, 1);
  1650. return finish_done;
  1651. }
  1652. if (s->last_lit)
  1653. FLUSH_BLOCK(s, 0);
  1654. return block_done;
  1655. }
  1656. #endif /* FASTEST */
  1657. /* ===========================================================================
  1658. * For Z_RLE, simply look for runs of bytes, generate matches only of distance
  1659. * one. Do not maintain a hash table. (It will be regenerated if this run of
  1660. * deflate switches away from Z_RLE.)
  1661. */
  1662. local block_state deflate_rle(s, flush)
  1663. deflate_state *s;
  1664. int flush;
  1665. {
  1666. int bflush; /* set if current block must be flushed */
  1667. uInt prev; /* byte at distance one to match */
  1668. Bytef *scan, *strend; /* scan goes up to strend for length of run */
  1669. for (;;) {
  1670. /* Make sure that we always have enough lookahead, except
  1671. * at the end of the input file. We need MAX_MATCH bytes
  1672. * for the longest run, plus one for the unrolled loop.
  1673. */
  1674. if (s->lookahead <= MAX_MATCH) {
  1675. fill_window(s);
  1676. if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) {
  1677. return need_more;
  1678. }
  1679. if (s->lookahead == 0) break; /* flush the current block */
  1680. }
  1681. /* See how many times the previous byte repeats */
  1682. s->match_length = 0;
  1683. if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
  1684. scan = s->window + s->strstart - 1;
  1685. prev = *scan;
  1686. if (prev == *++scan && prev == *++scan && prev == *++scan) {
  1687. strend = s->window + s->strstart + MAX_MATCH;
  1688. do {
  1689. } while (prev == *++scan && prev == *++scan &&
  1690. prev == *++scan && prev == *++scan &&
  1691. prev == *++scan && prev == *++scan &&
  1692. prev == *++scan && prev == *++scan &&
  1693. scan < strend);
  1694. s->match_length = MAX_MATCH - (int)(strend - scan);
  1695. if (s->match_length > s->lookahead)
  1696. s->match_length = s->lookahead;
  1697. }
  1698. Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
  1699. }
  1700. /* Emit match if have run of MIN_MATCH or longer, else emit literal */
  1701. if (s->match_length >= MIN_MATCH) {
  1702. check_match(s, s->strstart, s->strstart - 1, s->match_length);
  1703. _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);
  1704. s->lookahead -= s->match_length;
  1705. s->strstart += s->match_length;
  1706. s->match_length = 0;
  1707. } else {
  1708. /* No match, output a literal byte */
  1709. Tracevv((stderr,"%c", s->window[s->strstart]));
  1710. _tr_tally_lit (s, s->window[s->strstart], bflush);
  1711. s->lookahead--;
  1712. s->strstart++;
  1713. }
  1714. if (bflush) FLUSH_BLOCK(s, 0);
  1715. }
  1716. s->insert = 0;
  1717. if (flush == Z_FINISH) {
  1718. FLUSH_BLOCK(s, 1);
  1719. return finish_done;
  1720. }
  1721. if (s->last_lit)
  1722. FLUSH_BLOCK(s, 0);
  1723. return block_done;
  1724. }
  1725. /* ===========================================================================
  1726. * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
  1727. * (It will be regenerated if this run of deflate switches away from Huffman.)
  1728. */
  1729. local block_state deflate_huff(s, flush)
  1730. deflate_state *s;
  1731. int flush;
  1732. {
  1733. int bflush; /* set if current block must be flushed */
  1734. for (;;) {
  1735. /* Make sure that we have a literal to write. */
  1736. if (s->lookahead == 0) {
  1737. fill_window(s);
  1738. if (s->lookahead == 0) {
  1739. if (flush == Z_NO_FLUSH)
  1740. return need_more;
  1741. break; /* flush the current block */
  1742. }
  1743. }
  1744. /* Output a literal byte */
  1745. s->match_length = 0;
  1746. Tracevv((stderr,"%c", s->window[s->strstart]));
  1747. _tr_tally_lit (s, s->window[s->strstart], bflush);
  1748. s->lookahead--;
  1749. s->strstart++;
  1750. if (bflush) FLUSH_BLOCK(s, 0);
  1751. }
  1752. s->insert = 0;
  1753. if (flush == Z_FINISH) {
  1754. FLUSH_BLOCK(s, 1);
  1755. return finish_done;
  1756. }
  1757. if (s->last_lit)
  1758. FLUSH_BLOCK(s, 0);
  1759. return block_done;
  1760. }