xref: /haiku/src/bin/unzip/inflate.c (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2   Copyright (c) 1990-2002 Info-ZIP.  All rights reserved.
3 
4   See the accompanying file LICENSE, version 2000-Apr-09 or later
5   (the contents of which are also included in unzip.h) for terms of use.
6   If, for some reason, all these files are missing, the Info-ZIP license
7   also may be found at:  ftp://ftp.info-zip.org/pub/infozip/license.html
8 */
9 /* inflate.c -- by Mark Adler
10    version c17a, 04 Feb 2001 */
11 
12 
13 /* Copyright history:
14    - Starting with UnZip 5.41 of 16-April-2000, this source file
15      is covered by the Info-Zip LICENSE cited above.
16    - Prior versions of this source file, found in UnZip source packages
17      up to UnZip 5.40, were put in the public domain.
18      The original copyright note by Mark Adler was:
19          "You can do whatever you like with this source file,
20          though I would prefer that if you modify it and
21          redistribute it that you include comments to that effect
22          with your name and the date.  Thank you."
23 
24    History:
25    vers    date          who           what
26    ----  ---------  --------------  ------------------------------------
27     a    ~~ Feb 92  M. Adler        used full (large, one-step) lookup table
28     b1   21 Mar 92  M. Adler        first version with partial lookup tables
29     b2   21 Mar 92  M. Adler        fixed bug in fixed-code blocks
30     b3   22 Mar 92  M. Adler        sped up match copies, cleaned up some
31     b4   25 Mar 92  M. Adler        added prototypes; removed window[] (now
32                                     is the responsibility of unzip.h--also
33                                     changed name to slide[]), so needs diffs
34                                     for unzip.c and unzip.h (this allows
35                                     compiling in the small model on MSDOS);
36                                     fixed cast of q in huft_build();
37     b5   26 Mar 92  M. Adler        got rid of unintended macro recursion.
38     b6   27 Mar 92  M. Adler        got rid of nextbyte() routine.  fixed
39                                     bug in inflate_fixed().
40     c1   30 Mar 92  M. Adler        removed lbits, dbits environment variables.
41                                     changed BMAX to 16 for explode.  Removed
42                                     OUTB usage, and replaced it with flush()--
43                                     this was a 20% speed improvement!  Added
44                                     an explode.c (to replace unimplod.c) that
45                                     uses the huft routines here.  Removed
46                                     register union.
47     c2    4 Apr 92  M. Adler        fixed bug for file sizes a multiple of 32k.
48     c3   10 Apr 92  M. Adler        reduced memory of code tables made by
49                                     huft_build significantly (factor of two to
50                                     three).
51     c4   15 Apr 92  M. Adler        added NOMEMCPY do kill use of memcpy().
52                                     worked around a Turbo C optimization bug.
53     c5   21 Apr 92  M. Adler        added the WSIZE #define to allow reducing
54                                     the 32K window size for specialized
55                                     applications.
56     c6   31 May 92  M. Adler        added some typecasts to eliminate warnings
57     c7   27 Jun 92  G. Roelofs      added some more typecasts (444:  MSC bug).
58     c8    5 Oct 92  J-l. Gailly     added ifdef'd code to deal with PKZIP bug.
59     c9    9 Oct 92  M. Adler        removed a memory error message (~line 416).
60     c10  17 Oct 92  G. Roelofs      changed ULONG/UWORD/byte to ulg/ush/uch,
61                                     removed old inflate, renamed inflate_entry
62                                     to inflate, added Mark's fix to a comment.
63    c10.5 14 Dec 92  M. Adler        fix up error messages for incomplete trees.
64     c11   2 Jan 93  M. Adler        fixed bug in detection of incomplete
65                                     tables, and removed assumption that EOB is
66                                     the longest code (bad assumption).
67     c12   3 Jan 93  M. Adler        make tables for fixed blocks only once.
68     c13   5 Jan 93  M. Adler        allow all zero length codes (pkzip 2.04c
69                                     outputs one zero length code for an empty
70                                     distance tree).
71     c14  12 Mar 93  M. Adler        made inflate.c standalone with the
72                                     introduction of inflate.h.
73    c14b  16 Jul 93  G. Roelofs      added (unsigned) typecast to w at 470.
74    c14c  19 Jul 93  J. Bush         changed v[N_MAX], l[288], ll[28x+3x] arrays
75                                     to static for Amiga.
76    c14d  13 Aug 93  J-l. Gailly     de-complicatified Mark's c[*p++]++ thing.
77    c14e   8 Oct 93  G. Roelofs      changed memset() to memzero().
78    c14f  22 Oct 93  G. Roelofs      renamed quietflg to qflag; made Trace()
79                                     conditional; added inflate_free().
80    c14g  28 Oct 93  G. Roelofs      changed l/(lx+1) macro to pointer (Cray bug)
81    c14h   7 Dec 93  C. Ghisler      huft_build() optimizations.
82    c14i   9 Jan 94  A. Verheijen    set fixed_t{d,l} to NULL after freeing;
83                     G. Roelofs      check NEXTBYTE macro for EOF.
84    c14j  23 Jan 94  G. Roelofs      removed Ghisler "optimizations"; ifdef'd
85                                     EOF check.
86    c14k  27 Feb 94  G. Roelofs      added some typecasts to avoid warnings.
87    c14l   9 Apr 94  G. Roelofs      fixed split comments on preprocessor lines
88                                     to avoid bug in Encore compiler.
89    c14m   7 Jul 94  P. Kienitz      modified to allow assembler version of
90                                     inflate_codes() (define ASM_INFLATECODES)
91    c14n  22 Jul 94  G. Roelofs      changed fprintf to macro for DLL versions
92    c14o  23 Aug 94  C. Spieler      added a newline to a debug statement;
93                     G. Roelofs      added another typecast to avoid MSC warning
94    c14p   4 Oct 94  G. Roelofs      added (voidp *) cast to free() argument
95    c14q  30 Oct 94  G. Roelofs      changed fprintf macro to MESSAGE()
96    c14r   1 Nov 94  G. Roelofs      fixed possible redefinition of CHECK_EOF
97    c14s   7 May 95  S. Maxwell      OS/2 DLL globals stuff incorporated;
98                     P. Kienitz      "fixed" ASM_INFLATECODES macro/prototype
99    c14t  18 Aug 95  G. Roelofs      added UZinflate() to use zlib functions;
100                                     changed voidp to zvoid; moved huft_build()
101                                     and huft_free() to end of file
102    c14u   1 Oct 95  G. Roelofs      moved G into definition of MESSAGE macro
103    c14v   8 Nov 95  P. Kienitz      changed ASM_INFLATECODES to use a regular
104                                     call with __G__ instead of a macro
105     c15   3 Aug 96  M. Adler        fixed bomb-bug on random input data (Adobe)
106    c15b  24 Aug 96  M. Adler        more fixes for random input data
107    c15c  28 Mar 97  G. Roelofs      changed USE_ZLIB fatal exit code from
108                                     PK_MEM2 to PK_MEM3
109     c16  20 Apr 97  J. Altman       added memzero(v[]) in huft_build()
110    c16b  29 Mar 98  C. Spieler      modified DLL code for slide redirection
111    c16c  04 Apr 99  C. Spieler      fixed memory leaks when processing gets
112                                     stopped because of input data errors
113    c16d  05 Jul 99  C. Spieler      take care of FLUSH() return values and
114                                     stop processing in case of errors
115     c17  31 Dec 00  C. Spieler      added preliminary support for Deflate64
116    c17a  04 Feb 01  C. Spieler      complete integration of Deflate64 support
117    c17b  16 Feb 02  C. Spieler      changed type of "extra bits" arrays and
118                                     corresponding huft_buid() parameter e from
119                                     ush into uch, to save space
120  */
121 
122 
123 /*
124    Inflate deflated (PKZIP's method 8 compressed) data.  The compression
125    method searches for as much of the current string of bytes (up to a
126    length of 258) in the previous 32K bytes.  If it doesn't find any
127    matches (of at least length 3), it codes the next byte.  Otherwise, it
128    codes the length of the matched string and its distance backwards from
129    the current position.  There is a single Huffman code that codes both
130    single bytes (called "literals") and match lengths.  A second Huffman
131    code codes the distance information, which follows a length code.  Each
132    length or distance code actually represents a base value and a number
133    of "extra" (sometimes zero) bits to get to add to the base value.  At
134    the end of each deflated block is a special end-of-block (EOB) literal/
135    length code.  The decoding process is basically: get a literal/length
136    code; if EOB then done; if a literal, emit the decoded byte; if a
137    length then get the distance and emit the referred-to bytes from the
138    sliding window of previously emitted data.
139 
140    There are (currently) three kinds of inflate blocks: stored, fixed, and
141    dynamic.  The compressor outputs a chunk of data at a time and decides
142    which method to use on a chunk-by-chunk basis.  A chunk might typically
143    be 32K to 64K, uncompressed.  If the chunk is uncompressible, then the
144    "stored" method is used.  In this case, the bytes are simply stored as
145    is, eight bits per byte, with none of the above coding.  The bytes are
146    preceded by a count, since there is no longer an EOB code.
147 
148    If the data are compressible, then either the fixed or dynamic methods
149    are used.  In the dynamic method, the compressed data are preceded by
150    an encoding of the literal/length and distance Huffman codes that are
151    to be used to decode this block.  The representation is itself Huffman
152    coded, and so is preceded by a description of that code.  These code
153    descriptions take up a little space, and so for small blocks, there is
154    a predefined set of codes, called the fixed codes.  The fixed method is
155    used if the block ends up smaller that way (usually for quite small
156    chunks); otherwise the dynamic method is used.  In the latter case, the
157    codes are customized to the probabilities in the current block and so
158    can code it much better than the pre-determined fixed codes can.
159 
160    The Huffman codes themselves are decoded using a multi-level table
161    lookup, in order to maximize the speed of decoding plus the speed of
162    building the decoding tables.  See the comments below that precede the
163    lbits and dbits tuning parameters.
164 
165    GRR:  return values(?)
166            0  OK
167            1  incomplete table
168            2  bad input
169            3  not enough memory
170          the following return codes are passed through from FLUSH() errors
171            50 (PK_DISK)   "overflow of output space"
172            80 (IZ_CTRLC)  "canceled by user's request"
173  */
174 
175 
176 /*
177    Notes beyond the 1.93a appnote.txt:
178 
179    1. Distance pointers never point before the beginning of the output
180       stream.
181    2. Distance pointers can point back across blocks, up to 32k away.
182    3. There is an implied maximum of 7 bits for the bit length table and
183       15 bits for the actual data.
184    4. If only one code exists, then it is encoded using one bit.  (Zero
185       would be more efficient, but perhaps a little confusing.)  If two
186       codes exist, they are coded using one bit each (0 and 1).
187    5. There is no way of sending zero distance codes--a dummy must be
188       sent if there are none.  (History: a pre 2.0 version of PKZIP would
189       store blocks with no distance codes, but this was discovered to be
190       too harsh a criterion.)  Valid only for 1.93a.  2.04c does allow
191       zero distance codes, which is sent as one code of zero bits in
192       length.
193    6. There are up to 286 literal/length codes.  Code 256 represents the
194       end-of-block.  Note however that the static length tree defines
195       288 codes just to fill out the Huffman codes.  Codes 286 and 287
196       cannot be used though, since there is no length base or extra bits
197       defined for them.  Similarily, there are up to 30 distance codes.
198       However, static trees define 32 codes (all 5 bits) to fill out the
199       Huffman codes, but the last two had better not show up in the data.
200    7. Unzip can check dynamic Huffman blocks for complete code sets.
201       The exception is that a single code would not be complete (see #4).
202    8. The five bits following the block type is really the number of
203       literal codes sent minus 257.
204    9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
205       (1+6+6).  Therefore, to output three times the length, you output
206       three codes (1+1+1), whereas to output four times the same length,
207       you only need two codes (1+3).  Hmm.
208   10. In the tree reconstruction algorithm, Code = Code + Increment
209       only if BitLength(i) is not zero.  (Pretty obvious.)
210   11. Correction: 4 Bits: # of Bit Length codes - 4     (4 - 19)
211   12. Note: length code 284 can represent 227-258, but length code 285
212       really is 258.  The last length deserves its own, short code
213       since it gets used a lot in very redundant files.  The length
214       258 is special since 258 - 3 (the min match length) is 255.
215   13. The literal/length and distance code bit lengths are read as a
216       single stream of lengths.  It is possible (and advantageous) for
217       a repeat code (16, 17, or 18) to go across the boundary between
218       the two sets of lengths.
219   14. The Deflate64 (PKZIP method 9) variant of the compression algorithm
220       differs from "classic" deflate in the following 3 aspect:
221       a) The size of the sliding history window is expanded to 64 kByte.
222       b) The previously unused distance codes #30 and #31 code distances
223          from 32769 to 49152 and 49153 to 65536.  Both codes take 14 bits
224          of extra data to determine the exact position in their 16 kByte
225          range.
226       c) The last lit/length code #285 gets a different meaning. Instead
227          of coding a fixed maximum match length of 258, it is used as a
228          "generic" match length code, capable of coding any length from
229          3 (min match length + 0) to 65538 (min match length + 65535).
230          This means that the length code #285 takes 16 bits (!) of uncoded
231          extra data, added to a fixed min length of 3.
232       Changes a) and b) would have been transparent for valid deflated
233       data, but change c) requires to switch decoder configurations between
234       Deflate and Deflate64 modes.
235  */
236 
237 
238 #define PKZIP_BUG_WORKAROUND    /* PKZIP 1.93a problem--live with it */
239 
240 /*
241     inflate.h must supply the uch slide[WSIZE] array, the zvoid typedef
242     (void if (void *) is accepted, else char) and the NEXTBYTE,
243     FLUSH() and memzero macros.  If the window size is not 32K, it
244     should also define WSIZE.  If INFMOD is defined, it can include
245     compiled functions to support the NEXTBYTE and/or FLUSH() macros.
246     There are defaults for NEXTBYTE and FLUSH() below for use as
247     examples of what those functions need to do.  Normally, you would
248     also want FLUSH() to compute a crc on the data.  inflate.h also
249     needs to provide these typedefs:
250 
251         typedef unsigned char uch;
252         typedef unsigned short ush;
253         typedef unsigned long ulg;
254 
255     This module uses the external functions malloc() and free() (and
256     probably memset() or bzero() in the memzero() macro).  Their
257     prototypes are normally found in <string.h> and <stdlib.h>.
258  */
259 
260 #define __INFLATE_C     /* identifies this source module */
261 
262 /* #define DEBUG */
263 #define INFMOD          /* tell inflate.h to include code to be compiled */
264 #include "inflate.h"
265 
266 
267 /* marker for "unused" huft code, and corresponding check macro */
268 #define INVALID_CODE 99
269 #define IS_INVALID_CODE(c)  ((c) == INVALID_CODE)
270 
271 #ifndef WSIZE               /* default is 32K resp. 64K */
272 #  ifdef USE_DEFLATE64
273 #    define WSIZE   65536L  /* window size--must be a power of two, and */
274 #  else                     /*  at least 64K for PKZip's deflate64 method */
275 #    define WSIZE   0x8000  /* window size--must be a power of two, and */
276 #  endif                    /*  at least 32K for zip's deflate method */
277 #endif
278 
279 /* some buffer counters must be capable of holding 64k for Deflate64 */
280 #if (defined(USE_DEFLATE64) && defined(INT_16BIT))
281 #  define UINT_D64 ulg
282 #else
283 #  define UINT_D64 unsigned
284 #endif
285 
286 #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
287 #  define wsize G._wsize    /* wsize is a variable */
288 #else
289 #  define wsize WSIZE       /* wsize is a constant */
290 #endif
291 
292 
293 #ifndef NEXTBYTE        /* default is to simply get a byte from stdin */
294 #  define NEXTBYTE getchar()
295 #endif
296 
297 #ifndef MESSAGE   /* only used twice, for fixed strings--NOT general-purpose */
298 #  define MESSAGE(str,len,flag)  fprintf(stderr,(char *)(str))
299 #endif
300 
301 #ifndef FLUSH           /* default is to simply write the buffer to stdout */
302 #  define FLUSH(n) \
303     (((extent)fwrite(redirSlide, 1, (extent)(n), stdout) == (extent)(n)) ? \
304      0 : PKDISK)
305 #endif
306 /* Warning: the fwrite above might not work on 16-bit compilers, since
307    0x8000 might be interpreted as -32,768 by the library function.  When
308    support for Deflate64 is enabled, the window size is 64K and the
309    simple fwrite statement is definitely broken for 16-bit compilers. */
310 
311 #ifndef Trace
312 #  ifdef DEBUG
313 #    define Trace(x) fprintf x
314 #  else
315 #    define Trace(x)
316 #  endif
317 #endif
318 
319 
320 /*---------------------------------------------------------------------------*/
321 #ifdef USE_ZLIB
322 
323 
324 /*
325    GRR:  return values for both original inflate() and UZinflate()
326            0  OK
327            1  incomplete table(?)
328            2  bad input
329            3  not enough memory
330  */
331 
332 /**************************/
333 /*  Function UZinflate()  */
334 /**************************/
335 
336 int UZinflate(__G__ is_defl64)
337     __GDEF
338     int is_defl64;
339 /* decompress an inflated entry using the zlib routines */
340 {
341     int retval = 0;     /* return code: 0 = "no error" */
342     int err=Z_OK;
343 
344 #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
345     if (G.redirect_slide)
346         wsize = G.redirect_size, redirSlide = G.redirect_buffer;
347     else
348         wsize = WSIZE, redirSlide = slide;
349 #endif
350 
351     G.dstrm.next_out = redirSlide;
352     G.dstrm.avail_out = wsize;
353 
354     G.dstrm.next_in = G.inptr;
355     G.dstrm.avail_in = G.incnt;
356 
357     if (!G.inflInit) {
358         unsigned i;
359         int windowBits;
360 
361         /* only need to test this stuff once */
362         if (zlib_version[0] != ZLIB_VERSION[0]) {
363             Info(slide, 0x21, ((char *)slide,
364               "error:  incompatible zlib version (expected %s, found %s)\n",
365               ZLIB_VERSION, zlib_version));
366             return 3;
367         } else if (strcmp(zlib_version, ZLIB_VERSION) != 0)
368             Info(slide, 0x21, ((char *)slide,
369               "warning:  different zlib version (expected %s, using %s)\n",
370               ZLIB_VERSION, zlib_version));
371 
372         /* windowBits = log2(wsize) */
373         for (i = (unsigned)wsize, windowBits = 0;
374              !(i & 1);  i >>= 1, ++windowBits);
375         if ((unsigned)windowBits > (unsigned)15)
376             windowBits = 15;
377         else if (windowBits < 8)
378             windowBits = 8;
379 
380         G.dstrm.zalloc = (alloc_func)Z_NULL;
381         G.dstrm.zfree = (free_func)Z_NULL;
382 
383         Trace((stderr, "initializing inflate()\n"));
384         err = inflateInit2(&G.dstrm, -windowBits);
385 
386         if (err == Z_MEM_ERROR)
387             return 3;
388         else if (err != Z_OK)
389             Trace((stderr, "oops!  (inflateInit2() err = %d)\n", err));
390         G.inflInit = 1;
391     }
392 
393 #ifdef FUNZIP
394     while (err != Z_STREAM_END) {
395 #else /* !FUNZIP */
396     while (G.csize > 0) {
397         Trace((stderr, "first loop:  G.csize = %ld\n", G.csize));
398 #endif /* ?FUNZIP */
399         while (G.dstrm.avail_out > 0) {
400             err = inflate(&G.dstrm, Z_PARTIAL_FLUSH);
401 
402             if (err == Z_DATA_ERROR) {
403                 retval = 2; goto uzinflate_cleanup_exit;
404             } else if (err == Z_MEM_ERROR) {
405                 retval = 3; goto uzinflate_cleanup_exit;
406             } else if (err != Z_OK && err != Z_STREAM_END)
407                 Trace((stderr, "oops!  (inflate(first loop) err = %d)\n", err));
408 
409 #ifdef FUNZIP
410             if (err == Z_STREAM_END)    /* "END-of-entry-condition" ? */
411 #else /* !FUNZIP */
412             if (G.csize <= 0L)          /* "END-of-entry-condition" ? */
413 #endif /* ?FUNZIP */
414                 break;
415 
416             if (G.dstrm.avail_in <= 0) {
417                 if (fillinbuf(__G) == 0) {
418                     /* no "END-condition" yet, but no more data */
419                     retval = 2; goto uzinflate_cleanup_exit;
420                 }
421 
422                 G.dstrm.next_in = G.inptr;
423                 G.dstrm.avail_in = G.incnt;
424             }
425             Trace((stderr, "     avail_in = %d\n", G.dstrm.avail_in));
426         }
427         /* flush slide[] */
428         if ((retval = FLUSH(wsize - G.dstrm.avail_out)) != 0)
429             goto uzinflate_cleanup_exit;
430         Trace((stderr, "inside loop:  flushing %ld bytes (ptr diff = %ld)\n",
431           (long)(wsize - G.dstrm.avail_out),
432           (long)(G.dstrm.next_out-(Bytef *)redirSlide)));
433         G.dstrm.next_out = redirSlide;
434         G.dstrm.avail_out = wsize;
435     }
436 
437     /* no more input, so loop until we have all output */
438     Trace((stderr, "beginning final loop:  err = %d\n", err));
439     while (err != Z_STREAM_END) {
440         err = inflate(&G.dstrm, Z_PARTIAL_FLUSH);
441         if (err == Z_DATA_ERROR) {
442             retval = 2; goto uzinflate_cleanup_exit;
443         } else if (err == Z_MEM_ERROR) {
444             retval = 3; goto uzinflate_cleanup_exit;
445         } else if (err == Z_BUF_ERROR) {                /* DEBUG */
446             Trace((stderr,
447                    "zlib inflate() did not detect stream end (%s, %s)\n",
448                    G.zipfn, G.filename));
449             break;
450         } else if (err != Z_OK && err != Z_STREAM_END) {
451             Trace((stderr, "oops!  (inflate(final loop) err = %d)\n", err));
452             DESTROYGLOBALS();
453             EXIT(PK_MEM3);
454         }
455         /* final flush of slide[] */
456         if ((retval = FLUSH(wsize - G.dstrm.avail_out)) != 0)
457             goto uzinflate_cleanup_exit;
458         Trace((stderr, "final loop:  flushing %ld bytes (ptr diff = %ld)\n",
459           (long)(wsize - G.dstrm.avail_out),
460           (long)(G.dstrm.next_out-(Bytef *)redirSlide)));
461         G.dstrm.next_out = redirSlide;
462         G.dstrm.avail_out = wsize;
463     }
464     Trace((stderr, "total in = %ld, total out = %ld\n", G.dstrm.total_in,
465       G.dstrm.total_out));
466 
467     G.inptr = (uch *)G.dstrm.next_in;
468     G.incnt = (G.inbuf + INBUFSIZ) - G.inptr;  /* reset for other routines */
469 
470 uzinflate_cleanup_exit:
471     err = inflateReset(&G.dstrm);
472     if (err != Z_OK)
473         Trace((stderr, "oops!  (inflateReset() err = %d)\n", err));
474 
475     return retval;
476 }
477 
478 
479 /*---------------------------------------------------------------------------*/
480 #else /* !USE_ZLIB */
481 
482 
483 /* Function prototypes */
484 #ifndef OF
485 #  ifdef __STDC__
486 #    define OF(a) a
487 #  else
488 #    define OF(a) ()
489 #  endif
490 #endif /* !OF */
491 int inflate_codes OF((__GPRO__ struct huft *tl, struct huft *td,
492                       int bl, int bd));
493 static int inflate_stored OF((__GPRO));
494 static int inflate_fixed OF((__GPRO));
495 static int inflate_dynamic OF((__GPRO));
496 static int inflate_block OF((__GPRO__ int *e));
497 
498 
499 /* The inflate algorithm uses a sliding 32K byte window on the uncompressed
500    stream to find repeated byte strings.  This is implemented here as a
501    circular buffer.  The index is updated simply by incrementing and then
502    and'ing with 0x7fff (32K-1). */
503 /* It is left to other modules to supply the 32K area.  It is assumed
504    to be usable as if it were declared "uch slide[32768];" or as just
505    "uch *slide;" and then malloc'ed in the latter case.  The definition
506    must be in unzip.h, included above. */
507 
508 
509 /* unsigned wp;  moved to globals.h */     /* current position in slide */
510 
511 /* Tables for deflate from PKZIP's appnote.txt. */
512 /* - Order of the bit length code lengths */
513 static ZCONST unsigned border[] = {
514         16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
515 
516 /* - Copy lengths for literal codes 257..285 */
517 #ifdef USE_DEFLATE64
518 static ZCONST ush cplens64[] = {
519         3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
520         35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 3, 0, 0};
521         /* For Deflate64, the code 285 is defined differently. */
522 #else
523 #  define cplens32 cplens
524 #endif
525 static ZCONST ush cplens32[] = {
526         3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
527         35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
528         /* note: see note #13 above about the 258 in this list. */
529 /* - Extra bits for literal codes 257..285 */
530 #ifdef USE_DEFLATE64
531 static ZCONST uch cplext64[] = {
532         0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
533         3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 16, INVALID_CODE, INVALID_CODE};
534 #else
535 #  define cplext32 cplext
536 #endif
537 static ZCONST uch cplext32[] = {
538         0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
539         3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, INVALID_CODE, INVALID_CODE};
540 
541 /* - Copy offsets for distance codes 0..29 (0..31 for Deflate64) */
542 static ZCONST ush cpdist[] = {
543         1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
544         257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
545 #if (defined(USE_DEFLATE64) || defined(PKZIP_BUG_WORKAROUND))
546         8193, 12289, 16385, 24577, 32769, 49153};
547 #else
548         8193, 12289, 16385, 24577};
549 #endif
550 
551 /* - Extra bits for distance codes 0..29 (0..31 for Deflate64) */
552 #ifdef USE_DEFLATE64
553 static ZCONST uch cpdext64[] = {
554         0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
555         7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
556         12, 12, 13, 13, 14, 14};
557 #else
558 #  define cpdext32 cpdext
559 #endif
560 static ZCONST uch cpdext32[] = {
561         0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
562         7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
563 #ifdef PKZIP_BUG_WORKAROUND
564         12, 12, 13, 13, INVALID_CODE, INVALID_CODE};
565 #else
566         12, 12, 13, 13};
567 #endif
568 
569 #ifdef PKZIP_BUG_WORKAROUND
570 #  define MAXLITLENS 288
571 #else
572 #  define MAXLITLENS 286
573 #endif
574 #if (defined(USE_DEFLATE64) || defined(PKZIP_BUG_WORKAROUND))
575 #  define MAXDISTS 32
576 #else
577 #  define MAXDISTS 30
578 #endif
579 
580 
581 /* moved to consts.h (included in unzip.c), resp. funzip.c */
582 #if 0
583 /* And'ing with mask_bits[n] masks the lower n bits */
584 ZCONST ush near mask_bits[] = {
585     0x0000,
586     0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
587     0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
588 };
589 #endif /* 0 */
590 
591 
592 /* Macros for inflate() bit peeking and grabbing.
593    The usage is:
594 
595         NEEDBITS(j)
596         x = b & mask_bits[j];
597         DUMPBITS(j)
598 
599    where NEEDBITS makes sure that b has at least j bits in it, and
600    DUMPBITS removes the bits from b.  The macros use the variable k
601    for the number of bits in b.  Normally, b and k are register
602    variables for speed and are initialized at the begining of a
603    routine that uses these macros from a global bit buffer and count.
604 
605    In order to not ask for more bits than there are in the compressed
606    stream, the Huffman tables are constructed to only ask for just
607    enough bits to make up the end-of-block code (value 256).  Then no
608    bytes need to be "returned" to the buffer at the end of the last
609    block.  See the huft_build() routine.
610  */
611 
612 /* These have been moved to globals.h */
613 #if 0
614 ulg bb;                         /* bit buffer */
615 unsigned bk;                    /* bits in bit buffer */
616 #endif
617 
618 #ifndef CHECK_EOF
619 #  define CHECK_EOF   /* default as of 5.13/5.2 */
620 #endif
621 
622 #ifndef CHECK_EOF
623 #  define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE)<<k;k+=8;}}
624 #else
625 #  define NEEDBITS(n) {while(k<(n)){int c=NEXTBYTE;\
626     if(c==EOF){retval=1;goto cleanup_and_exit;}\
627     b|=((ulg)c)<<k;k+=8;}}
628 #endif
629 
630 #define DUMPBITS(n) {b>>=(n);k-=(n);}
631 
632 
633 /*
634    Huffman code decoding is performed using a multi-level table lookup.
635    The fastest way to decode is to simply build a lookup table whose
636    size is determined by the longest code.  However, the time it takes
637    to build this table can also be a factor if the data being decoded
638    are not very long.  The most common codes are necessarily the
639    shortest codes, so those codes dominate the decoding time, and hence
640    the speed.  The idea is you can have a shorter table that decodes the
641    shorter, more probable codes, and then point to subsidiary tables for
642    the longer codes.  The time it costs to decode the longer codes is
643    then traded against the time it takes to make longer tables.
644 
645    This results of this trade are in the variables lbits and dbits
646    below.  lbits is the number of bits the first level table for literal/
647    length codes can decode in one step, and dbits is the same thing for
648    the distance codes.  Subsequent tables are also less than or equal to
649    those sizes.  These values may be adjusted either when all of the
650    codes are shorter than that, in which case the longest code length in
651    bits is used, or when the shortest code is *longer* than the requested
652    table size, in which case the length of the shortest code in bits is
653    used.
654 
655    There are two different values for the two tables, since they code a
656    different number of possibilities each.  The literal/length table
657    codes 286 possible values, or in a flat code, a little over eight
658    bits.  The distance table codes 30 possible values, or a little less
659    than five bits, flat.  The optimum values for speed end up being
660    about one bit more than those, so lbits is 8+1 and dbits is 5+1.
661    The optimum values may differ though from machine to machine, and
662    possibly even between compilers.  Your mileage may vary.
663  */
664 
665 
666 static ZCONST int lbits = 9;    /* bits in base literal/length lookup table */
667 static ZCONST int dbits = 6;    /* bits in base distance lookup table */
668 
669 
670 #ifndef ASM_INFLATECODES
671 
672 int inflate_codes(__G__ tl, td, bl, bd)
673      __GDEF
674 struct huft *tl, *td;   /* literal/length and distance decoder tables */
675 int bl, bd;             /* number of bits decoded by tl[] and td[] */
676 /* inflate (decompress) the codes in a deflated (compressed) block.
677    Return an error code or zero if it all goes ok. */
678 {
679   register unsigned e;  /* table entry flag/number of extra bits */
680   unsigned d;           /* index for copy */
681   UINT_D64 n;           /* length for copy (deflate64: might be 64k+2) */
682   UINT_D64 w;           /* current window position (deflate64: up to 64k) */
683   struct huft *t;       /* pointer to table entry */
684   unsigned ml, md;      /* masks for bl and bd bits */
685   register ulg b;       /* bit buffer */
686   register unsigned k;  /* number of bits in bit buffer */
687   int retval = 0;       /* error code returned: initialized to "no error" */
688 
689 
690   /* make local copies of globals */
691   b = G.bb;                       /* initialize bit buffer */
692   k = G.bk;
693   w = G.wp;                       /* initialize window position */
694 
695 
696   /* inflate the coded data */
697   ml = mask_bits[bl];           /* precompute masks for speed */
698   md = mask_bits[bd];
699   while (1)                     /* do until end of block */
700   {
701     NEEDBITS((unsigned)bl)
702     t = tl + ((unsigned)b & ml);
703     while (1) {
704       DUMPBITS(t->b)
705 
706       if ((e = t->e) == 32)     /* then it's a literal */
707       {
708         redirSlide[w++] = (uch)t->v.n;
709         if (w == wsize)
710         {
711           if ((retval = FLUSH(w)) != 0) goto cleanup_and_exit;
712           w = 0;
713         }
714         break;
715       }
716 
717       if (e < 31)               /* then it's a length */
718       {
719         /* get length of block to copy */
720         NEEDBITS(e)
721         n = t->v.n + ((unsigned)b & mask_bits[e]);
722         DUMPBITS(e)
723 
724         /* decode distance of block to copy */
725         NEEDBITS((unsigned)bd)
726         t = td + ((unsigned)b & md);
727         while (1) {
728           DUMPBITS(t->b)
729           if ((e = t->e) < 32)
730             break;
731           if (IS_INVALID_CODE(e))
732             return 1;
733           e &= 31;
734           NEEDBITS(e)
735           t = t->v.t + ((unsigned)b & mask_bits[e]);
736         }
737         NEEDBITS(e)
738         d = (unsigned)w - t->v.n - ((unsigned)b & mask_bits[e]);
739         DUMPBITS(e)
740 
741         /* do the copy */
742         do {
743 #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
744           if (G.redirect_slide) {
745             /* &= w/ wsize unnecessary & wrong if redirect */
746             if ((UINT_D64)d >= wsize)
747               return 1;         /* invalid compressed data */
748             e = (unsigned)(wsize - (d > (unsigned)w ? (UINT_D64)d : w));
749           }
750           else
751 #endif
752             e = (unsigned)(wsize -
753                            ((d &= (unsigned)(wsize-1)) > (unsigned)w ?
754                             (UINT_D64)d : w));
755           if ((UINT_D64)e > n) e = (unsigned)n;
756           n -= e;
757 #ifndef NOMEMCPY
758           if ((unsigned)w - d >= e)
759           /* (this test assumes unsigned comparison) */
760           {
761             memcpy(redirSlide + (unsigned)w, redirSlide + d, e);
762             w += e;
763             d += e;
764           }
765           else                  /* do it slowly to avoid memcpy() overlap */
766 #endif /* !NOMEMCPY */
767             do {
768               redirSlide[w++] = redirSlide[d++];
769             } while (--e);
770           if (w == wsize)
771           {
772             if ((retval = FLUSH(w)) != 0) goto cleanup_and_exit;
773             w = 0;
774           }
775         } while (n);
776         break;
777       }
778 
779       if (e == 31)              /* it's the EOB signal */
780       {
781         /* sorry for this goto, but we have to exit two loops at once */
782         goto cleanup_decode;
783       }
784 
785       if (IS_INVALID_CODE(e))
786         return 1;
787 
788       e &= 31;
789       NEEDBITS(e)
790       t = t->v.t + ((unsigned)b & mask_bits[e]);
791     }
792   }
793 cleanup_decode:
794 
795   /* restore the globals from the locals */
796   G.wp = (unsigned)w;             /* restore global window pointer */
797   G.bb = b;                       /* restore global bit buffer */
798   G.bk = k;
799 
800 
801 cleanup_and_exit:
802   /* done */
803   return retval;
804 }
805 
806 #endif /* ASM_INFLATECODES */
807 
808 
809 
810 static int inflate_stored(__G)
811      __GDEF
812 /* "decompress" an inflated type 0 (stored) block. */
813 {
814   UINT_D64 w;           /* current window position (deflate64: up to 64k!) */
815   unsigned n;           /* number of bytes in block */
816   register ulg b;       /* bit buffer */
817   register unsigned k;  /* number of bits in bit buffer */
818   int retval = 0;       /* error code returned: initialized to "no error" */
819 
820 
821   /* make local copies of globals */
822   Trace((stderr, "\nstored block"));
823   b = G.bb;                       /* initialize bit buffer */
824   k = G.bk;
825   w = G.wp;                       /* initialize window position */
826 
827 
828   /* go to byte boundary */
829   n = k & 7;
830   DUMPBITS(n);
831 
832 
833   /* get the length and its complement */
834   NEEDBITS(16)
835   n = ((unsigned)b & 0xffff);
836   DUMPBITS(16)
837   NEEDBITS(16)
838   if (n != (unsigned)((~b) & 0xffff))
839     return 1;                   /* error in compressed data */
840   DUMPBITS(16)
841 
842 
843   /* read and output the compressed data */
844   while (n--)
845   {
846     NEEDBITS(8)
847     redirSlide[w++] = (uch)b;
848     if (w == wsize)
849     {
850       if ((retval = FLUSH(w)) != 0) goto cleanup_and_exit;
851       w = 0;
852     }
853     DUMPBITS(8)
854   }
855 
856 
857   /* restore the globals from the locals */
858   G.wp = (unsigned)w;             /* restore global window pointer */
859   G.bb = b;                       /* restore global bit buffer */
860   G.bk = k;
861 
862 cleanup_and_exit:
863   return retval;
864 }
865 
866 
867 /* Globals for literal tables (built once) */
868 /* Moved to globals.h                      */
869 #if 0
870 struct huft *fixed_tl = (struct huft *)NULL;
871 struct huft *fixed_td;
872 int fixed_bl, fixed_bd;
873 #endif
874 
875 static int inflate_fixed(__G)
876      __GDEF
877 /* decompress an inflated type 1 (fixed Huffman codes) block.  We should
878    either replace this with a custom decoder, or at least precompute the
879    Huffman tables. */
880 {
881   /* if first time, set up tables for fixed blocks */
882   Trace((stderr, "\nliteral block"));
883   if (G.fixed_tl == (struct huft *)NULL)
884   {
885     int i;                /* temporary variable */
886     unsigned l[288];      /* length list for huft_build */
887 
888     /* literal table */
889     for (i = 0; i < 144; i++)
890       l[i] = 8;
891     for (; i < 256; i++)
892       l[i] = 9;
893     for (; i < 280; i++)
894       l[i] = 7;
895     for (; i < 288; i++)          /* make a complete, but wrong code set */
896       l[i] = 8;
897     G.fixed_bl = 7;
898 #ifdef USE_DEFLATE64
899     if ((i = huft_build(__G__ l, 288, 257, G.cplens, G.cplext,
900                         &G.fixed_tl, &G.fixed_bl)) != 0)
901 #else
902     if ((i = huft_build(__G__ l, 288, 257, cplens, cplext,
903                         &G.fixed_tl, &G.fixed_bl)) != 0)
904 #endif
905     {
906       G.fixed_tl = (struct huft *)NULL;
907       return i;
908     }
909 
910     /* distance table */
911     for (i = 0; i < MAXDISTS; i++)      /* make an incomplete code set */
912       l[i] = 5;
913     G.fixed_bd = 5;
914 #ifdef USE_DEFLATE64
915     if ((i = huft_build(__G__ l, MAXDISTS, 0, cpdist, G.cpdext,
916                         &G.fixed_td, &G.fixed_bd)) > 1)
917 #else
918     if ((i = huft_build(__G__ l, MAXDISTS, 0, cpdist, cpdext,
919                         &G.fixed_td, &G.fixed_bd)) > 1)
920 #endif
921     {
922       huft_free(G.fixed_tl);
923       G.fixed_td = G.fixed_tl = (struct huft *)NULL;
924       return i;
925     }
926   }
927 
928   /* decompress until an end-of-block code */
929   return inflate_codes(__G__ G.fixed_tl, G.fixed_td,
930                              G.fixed_bl, G.fixed_bd);
931 }
932 
933 
934 
935 static int inflate_dynamic(__G)
936   __GDEF
937 /* decompress an inflated type 2 (dynamic Huffman codes) block. */
938 {
939   int i;                /* temporary variables */
940   unsigned j;
941   unsigned l;           /* last length */
942   unsigned m;           /* mask for bit lengths table */
943   unsigned n;           /* number of lengths to get */
944   struct huft *tl;      /* literal/length code table */
945   struct huft *td;      /* distance code table */
946   int bl;               /* lookup bits for tl */
947   int bd;               /* lookup bits for td */
948   unsigned nb;          /* number of bit length codes */
949   unsigned nl;          /* number of literal/length codes */
950   unsigned nd;          /* number of distance codes */
951   unsigned ll[MAXLITLENS+MAXDISTS]; /* lit./length and distance code lengths */
952   register ulg b;       /* bit buffer */
953   register unsigned k;  /* number of bits in bit buffer */
954   int retval = 0;       /* error code returned: initialized to "no error" */
955 
956 
957   /* make local bit buffer */
958   Trace((stderr, "\ndynamic block"));
959   b = G.bb;
960   k = G.bk;
961 
962 
963   /* read in table lengths */
964   NEEDBITS(5)
965   nl = 257 + ((unsigned)b & 0x1f);      /* number of literal/length codes */
966   DUMPBITS(5)
967   NEEDBITS(5)
968   nd = 1 + ((unsigned)b & 0x1f);        /* number of distance codes */
969   DUMPBITS(5)
970   NEEDBITS(4)
971   nb = 4 + ((unsigned)b & 0xf);         /* number of bit length codes */
972   DUMPBITS(4)
973   if (nl > MAXLITLENS || nd > MAXDISTS)
974     return 1;                   /* bad lengths */
975 
976 
977   /* read in bit-length-code lengths */
978   for (j = 0; j < nb; j++)
979   {
980     NEEDBITS(3)
981     ll[border[j]] = (unsigned)b & 7;
982     DUMPBITS(3)
983   }
984   for (; j < 19; j++)
985     ll[border[j]] = 0;
986 
987 
988   /* build decoding table for trees--single level, 7 bit lookup */
989   bl = 7;
990   retval = huft_build(__G__ ll, 19, 19, NULL, NULL, &tl, &bl);
991   if (bl == 0)                  /* no bit lengths */
992     retval = 1;
993   if (retval)
994   {
995     if (retval == 1)
996       huft_free(tl);
997     return retval;              /* incomplete code set */
998   }
999 
1000 
1001   /* read in literal and distance code lengths */
1002   n = nl + nd;
1003   m = mask_bits[bl];
1004   i = l = 0;
1005   while ((unsigned)i < n)
1006   {
1007     NEEDBITS((unsigned)bl)
1008     j = (td = tl + ((unsigned)b & m))->b;
1009     DUMPBITS(j)
1010     j = td->v.n;
1011     if (j < 16)                 /* length of code in bits (0..15) */
1012       ll[i++] = l = j;          /* save last length in l */
1013     else if (j == 16)           /* repeat last length 3 to 6 times */
1014     {
1015       NEEDBITS(2)
1016       j = 3 + ((unsigned)b & 3);
1017       DUMPBITS(2)
1018       if ((unsigned)i + j > n)
1019         return 1;
1020       while (j--)
1021         ll[i++] = l;
1022     }
1023     else if (j == 17)           /* 3 to 10 zero length codes */
1024     {
1025       NEEDBITS(3)
1026       j = 3 + ((unsigned)b & 7);
1027       DUMPBITS(3)
1028       if ((unsigned)i + j > n)
1029         return 1;
1030       while (j--)
1031         ll[i++] = 0;
1032       l = 0;
1033     }
1034     else                        /* j == 18: 11 to 138 zero length codes */
1035     {
1036       NEEDBITS(7)
1037       j = 11 + ((unsigned)b & 0x7f);
1038       DUMPBITS(7)
1039       if ((unsigned)i + j > n)
1040         return 1;
1041       while (j--)
1042         ll[i++] = 0;
1043       l = 0;
1044     }
1045   }
1046 
1047 
1048   /* free decoding table for trees */
1049   huft_free(tl);
1050 
1051 
1052   /* restore the global bit buffer */
1053   G.bb = b;
1054   G.bk = k;
1055 
1056 
1057   /* build the decoding tables for literal/length and distance codes */
1058   bl = lbits;
1059 #ifdef USE_DEFLATE64
1060   retval = huft_build(__G__ ll, nl, 257, G.cplens, G.cplext, &tl, &bl);
1061 #else
1062   retval = huft_build(__G__ ll, nl, 257, cplens, cplext, &tl, &bl);
1063 #endif
1064   if (bl == 0)                  /* no literals or lengths */
1065     retval = 1;
1066   if (retval)
1067   {
1068     if (retval == 1) {
1069       if (!uO.qflag)
1070         MESSAGE((uch *)"(incomplete l-tree)  ", 21L, 1);
1071       huft_free(tl);
1072     }
1073     return retval;              /* incomplete code set */
1074   }
1075   bd = dbits;
1076 #ifdef USE_DEFLATE64
1077   retval = huft_build(__G__ ll + nl, nd, 0, cpdist, G.cpdext, &td, &bd);
1078 #else
1079   retval = huft_build(__G__ ll + nl, nd, 0, cpdist, cpdext, &td, &bd);
1080 #endif
1081 #ifdef PKZIP_BUG_WORKAROUND
1082   if (retval == 1)
1083     retval = 0;
1084 #endif
1085   if (bd == 0 && nl > 257)    /* lengths but no distances */
1086     retval = 1;
1087   if (retval)
1088   {
1089     if (retval == 1) {
1090       if (!uO.qflag)
1091         MESSAGE((uch *)"(incomplete d-tree)  ", 21L, 1);
1092       huft_free(td);
1093     }
1094     huft_free(tl);
1095     return retval;
1096   }
1097 
1098   /* decompress until an end-of-block code */
1099   retval = inflate_codes(__G__ tl, td, bl, bd);
1100 
1101 cleanup_and_exit:
1102   /* free the decoding tables, return */
1103   huft_free(tl);
1104   huft_free(td);
1105   return retval;
1106 }
1107 
1108 
1109 
1110 static int inflate_block(__G__ e)
1111   __GDEF
1112   int *e;               /* last block flag */
1113 /* decompress an inflated block */
1114 {
1115   unsigned t;           /* block type */
1116   register ulg b;       /* bit buffer */
1117   register unsigned k;  /* number of bits in bit buffer */
1118   int retval = 0;       /* error code returned: initialized to "no error" */
1119 
1120 
1121   /* make local bit buffer */
1122   b = G.bb;
1123   k = G.bk;
1124 
1125 
1126   /* read in last block bit */
1127   NEEDBITS(1)
1128   *e = (int)b & 1;
1129   DUMPBITS(1)
1130 
1131 
1132   /* read in block type */
1133   NEEDBITS(2)
1134   t = (unsigned)b & 3;
1135   DUMPBITS(2)
1136 
1137 
1138   /* restore the global bit buffer */
1139   G.bb = b;
1140   G.bk = k;
1141 
1142 
1143   /* inflate that block type */
1144   if (t == 2)
1145     return inflate_dynamic(__G);
1146   if (t == 0)
1147     return inflate_stored(__G);
1148   if (t == 1)
1149     return inflate_fixed(__G);
1150 
1151 
1152   /* bad block type */
1153   retval = 2;
1154 
1155 cleanup_and_exit:
1156   return retval;
1157 }
1158 
1159 
1160 
1161 int inflate(__G__ is_defl64)
1162     __GDEF
1163     int is_defl64;
1164 /* decompress an inflated entry */
1165 {
1166   int e;                /* last block flag */
1167   int r;                /* result code */
1168 #ifdef DEBUG
1169   unsigned h = 0;       /* maximum struct huft's malloc'ed */
1170 #endif
1171 
1172 #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
1173   if (G.redirect_slide)
1174     wsize = G.redirect_size, redirSlide = G.redirect_buffer;
1175   else
1176     wsize = WSIZE, redirSlide = slide;   /* how they're #defined if !DLL */
1177 #endif
1178 
1179   /* initialize window, bit buffer */
1180   G.wp = 0;
1181   G.bk = 0;
1182   G.bb = 0;
1183 
1184 #ifdef USE_DEFLATE64
1185   if (is_defl64) {
1186     G.cplens = cplens64;
1187     G.cplext = cplext64;
1188     G.cpdext = cpdext64;
1189     G.fixed_tl = G.fixed_tl64;
1190     G.fixed_bl = G.fixed_bl64;
1191     G.fixed_td = G.fixed_td64;
1192     G.fixed_bd = G.fixed_bd64;
1193   } else {
1194     G.cplens = cplens32;
1195     G.cplext = cplext32;
1196     G.cpdext = cpdext32;
1197     G.fixed_tl = G.fixed_tl32;
1198     G.fixed_bl = G.fixed_bl32;
1199     G.fixed_td = G.fixed_td32;
1200     G.fixed_bd = G.fixed_bd32;
1201   }
1202 #else /* !USE_DEFLATE64 */
1203   if (is_defl64) {
1204     /* This should not happen unless UnZip is built from object files
1205      * compiled with inconsistent option setting.  Handle this by
1206      * returning with "bad input" error code.
1207      */
1208     Trace((stderr, "\nThis inflate() cannot handle Deflate64!\n"));
1209     return 2;
1210   }
1211 #endif /* ?USE_DEFLATE64 */
1212 
1213   /* decompress until the last block */
1214   do {
1215 #ifdef DEBUG
1216     G.hufts = 0;
1217 #endif
1218     if ((r = inflate_block(__G__ &e)) != 0)
1219       return r;
1220 #ifdef DEBUG
1221     if (G.hufts > h)
1222       h = G.hufts;
1223 #endif
1224   } while (!e);
1225 
1226   Trace((stderr, "\n%u bytes in Huffman tables (%u/entry)\n",
1227          h * (unsigned)sizeof(struct huft), (unsigned)sizeof(struct huft)));
1228 
1229 #ifdef USE_DEFLATE64
1230   if (is_defl64) {
1231     G.fixed_tl64 = G.fixed_tl;
1232     G.fixed_bl64 = G.fixed_bl;
1233     G.fixed_td64 = G.fixed_td;
1234     G.fixed_bd64 = G.fixed_bd;
1235   } else {
1236     G.fixed_tl32 = G.fixed_tl;
1237     G.fixed_bl32 = G.fixed_bl;
1238     G.fixed_td32 = G.fixed_td;
1239     G.fixed_bd32 = G.fixed_bd;
1240   }
1241 #endif
1242 
1243   /* flush out redirSlide and return (success, unless final FLUSH failed) */
1244   return (FLUSH(G.wp));
1245 }
1246 
1247 
1248 
1249 int inflate_free(__G)
1250     __GDEF
1251 {
1252   if (G.fixed_tl != (struct huft *)NULL)
1253   {
1254     huft_free(G.fixed_td);
1255     huft_free(G.fixed_tl);
1256     G.fixed_td = G.fixed_tl = (struct huft *)NULL;
1257   }
1258   return 0;
1259 }
1260 
1261 #endif /* ?USE_ZLIB */
1262 
1263 
1264 /*
1265  * GRR:  moved huft_build() and huft_free() down here; used by explode()
1266  *       and fUnZip regardless of whether USE_ZLIB defined or not
1267  */
1268 
1269 
1270 /* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
1271 #define BMAX 16         /* maximum bit length of any code (16 for explode) */
1272 #define N_MAX 288       /* maximum number of codes in any set */
1273 
1274 
1275 int huft_build(__G__ b, n, s, d, e, t, m)
1276   __GDEF
1277   ZCONST unsigned *b;   /* code lengths in bits (all assumed <= BMAX) */
1278   unsigned n;           /* number of codes (assumed <= N_MAX) */
1279   unsigned s;           /* number of simple-valued codes (0..s-1) */
1280   ZCONST ush *d;        /* list of base values for non-simple codes */
1281   ZCONST uch *e;        /* list of extra bits for non-simple codes */
1282   struct huft **t;      /* result: starting table */
1283   int *m;               /* maximum lookup bits, returns actual */
1284 /* Given a list of code lengths and a maximum table size, make a set of
1285    tables to decode that set of codes.  Return zero on success, one if
1286    the given code set is incomplete (the tables are still built in this
1287    case), two if the input is invalid (all zero length codes or an
1288    oversubscribed set of lengths), and three if not enough memory.
1289    The code with value 256 is special, and the tables are constructed
1290    so that no bits beyond that code are fetched when that code is
1291    decoded. */
1292 {
1293   unsigned a;                   /* counter for codes of length k */
1294   unsigned c[BMAX+1];           /* bit length count table */
1295   unsigned el;                  /* length of EOB code (value 256) */
1296   unsigned f;                   /* i repeats in table every f entries */
1297   int g;                        /* maximum code length */
1298   int h;                        /* table level */
1299   register unsigned i;          /* counter, current code */
1300   register unsigned j;          /* counter */
1301   register int k;               /* number of bits in current code */
1302   int lx[BMAX+1];               /* memory for l[-1..BMAX-1] */
1303   int *l = lx+1;                /* stack of bits per table */
1304   register unsigned *p;         /* pointer into c[], b[], or v[] */
1305   register struct huft *q;      /* points to current table */
1306   struct huft r;                /* table entry for structure assignment */
1307   struct huft *u[BMAX];         /* table stack */
1308   unsigned v[N_MAX];            /* values in order of bit length */
1309   register int w;               /* bits before this table == (l * h) */
1310   unsigned x[BMAX+1];           /* bit offsets, then code stack */
1311   unsigned *xp;                 /* pointer into x */
1312   int y;                        /* number of dummy codes added */
1313   unsigned z;                   /* number of entries in current table */
1314 
1315 
1316   /* Generate counts for each bit length */
1317   el = n > 256 ? b[256] : BMAX; /* set length of EOB code, if any */
1318   memzero((char *)c, sizeof(c));
1319   p = (unsigned *)b;  i = n;
1320   do {
1321     c[*p]++; p++;               /* assume all entries <= BMAX */
1322   } while (--i);
1323   if (c[0] == n)                /* null input--all zero length codes */
1324   {
1325     *t = (struct huft *)NULL;
1326     *m = 0;
1327     return 0;
1328   }
1329 
1330 
1331   /* Find minimum and maximum length, bound *m by those */
1332   for (j = 1; j <= BMAX; j++)
1333     if (c[j])
1334       break;
1335   k = j;                        /* minimum code length */
1336   if ((unsigned)*m < j)
1337     *m = j;
1338   for (i = BMAX; i; i--)
1339     if (c[i])
1340       break;
1341   g = i;                        /* maximum code length */
1342   if ((unsigned)*m > i)
1343     *m = i;
1344 
1345 
1346   /* Adjust last length count to fill out codes, if needed */
1347   for (y = 1 << j; j < i; j++, y <<= 1)
1348     if ((y -= c[j]) < 0)
1349       return 2;                 /* bad input: more codes than bits */
1350   if ((y -= c[i]) < 0)
1351     return 2;
1352   c[i] += y;
1353 
1354 
1355   /* Generate starting offsets into the value table for each length */
1356   x[1] = j = 0;
1357   p = c + 1;  xp = x + 2;
1358   while (--i) {                 /* note that i == g from above */
1359     *xp++ = (j += *p++);
1360   }
1361 
1362 
1363   /* Make a table of values in order of bit lengths */
1364   memzero((char *)v, sizeof(v));
1365   p = (unsigned *)b;  i = 0;
1366   do {
1367     if ((j = *p++) != 0)
1368       v[x[j]++] = i;
1369   } while (++i < n);
1370   n = x[g];                     /* set n to length of v */
1371 
1372 
1373   /* Generate the Huffman codes and for each, make the table entries */
1374   x[0] = i = 0;                 /* first Huffman code is zero */
1375   p = v;                        /* grab values in bit order */
1376   h = -1;                       /* no tables yet--level -1 */
1377   w = l[-1] = 0;                /* no bits decoded yet */
1378   u[0] = (struct huft *)NULL;   /* just to keep compilers happy */
1379   q = (struct huft *)NULL;      /* ditto */
1380   z = 0;                        /* ditto */
1381 
1382   /* go through the bit lengths (k already is bits in shortest code) */
1383   for (; k <= g; k++)
1384   {
1385     a = c[k];
1386     while (a--)
1387     {
1388       /* here i is the Huffman code of length k bits for value *p */
1389       /* make tables up to required level */
1390       while (k > w + l[h])
1391       {
1392         w += l[h++];            /* add bits already decoded */
1393 
1394         /* compute minimum size table less than or equal to *m bits */
1395         z = (z = g - w) > (unsigned)*m ? *m : z;        /* upper limit */
1396         if ((f = 1 << (j = k - w)) > a + 1)     /* try a k-w bit table */
1397         {                       /* too few codes for k-w bit table */
1398           f -= a + 1;           /* deduct codes from patterns left */
1399           xp = c + k;
1400           while (++j < z)       /* try smaller tables up to z bits */
1401           {
1402             if ((f <<= 1) <= *++xp)
1403               break;            /* enough codes to use up j bits */
1404             f -= *xp;           /* else deduct codes from patterns */
1405           }
1406         }
1407         if ((unsigned)w + j > el && (unsigned)w < el)
1408           j = el - w;           /* make EOB code end at table */
1409         z = 1 << j;             /* table entries for j-bit table */
1410         l[h] = j;               /* set table size in stack */
1411 
1412         /* allocate and link in new table */
1413         if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
1414             (struct huft *)NULL)
1415         {
1416           if (h)
1417             huft_free(u[0]);
1418           return 3;             /* not enough memory */
1419         }
1420 #ifdef DEBUG
1421         G.hufts += z + 1;         /* track memory usage */
1422 #endif
1423         *t = q + 1;             /* link to list for huft_free() */
1424         *(t = &(q->v.t)) = (struct huft *)NULL;
1425         u[h] = ++q;             /* table starts after link */
1426 
1427         /* connect to last table, if there is one */
1428         if (h)
1429         {
1430           x[h] = i;             /* save pattern for backing up */
1431           r.b = (uch)l[h-1];    /* bits to dump before this table */
1432           r.e = (uch)(32 + j);  /* bits in this table */
1433           r.v.t = q;            /* pointer to this table */
1434           j = (i & ((1 << w) - 1)) >> (w - l[h-1]);
1435           u[h-1][j] = r;        /* connect to last table */
1436         }
1437       }
1438 
1439       /* set up table entry in r */
1440       r.b = (uch)(k - w);
1441       if (p >= v + n)
1442         r.e = INVALID_CODE;     /* out of values--invalid code */
1443       else if (*p < s)
1444       {
1445         r.e = (uch)(*p < 256 ? 32 : 31);  /* 256 is end-of-block code */
1446         r.v.n = (ush)*p++;                /* simple code is just the value */
1447       }
1448       else
1449       {
1450         r.e = e[*p - s];        /* non-simple--look up in lists */
1451         r.v.n = d[*p++ - s];
1452       }
1453 
1454       /* fill code-like entries with r */
1455       f = 1 << (k - w);
1456       for (j = i >> w; j < z; j += f)
1457         q[j] = r;
1458 
1459       /* backwards increment the k-bit code i */
1460       for (j = 1 << (k - 1); i & j; j >>= 1)
1461         i ^= j;
1462       i ^= j;
1463 
1464       /* backup over finished tables */
1465       while ((i & ((1 << w) - 1)) != x[h])
1466         w -= l[--h];            /* don't need to update q */
1467     }
1468   }
1469 
1470 
1471   /* return actual size of base table */
1472   *m = l[0];
1473 
1474 
1475   /* Return true (1) if we were given an incomplete table */
1476   return y != 0 && g != 1;
1477 }
1478 
1479 
1480 
1481 int huft_free(t)
1482 struct huft *t;         /* table to free */
1483 /* Free the malloc'ed tables built by huft_build(), which makes a linked
1484    list of the tables it made, with the links in a dummy first entry of
1485    each table. */
1486 {
1487   register struct huft *p, *q;
1488 
1489 
1490   /* Go through linked list, freeing from the malloced (t[-1]) address. */
1491   p = t;
1492   while (p != (struct huft *)NULL)
1493   {
1494     q = (--p)->v.t;
1495     free((zvoid *)p);
1496     p = q;
1497   }
1498   return 0;
1499 }
1500