--- e/arch/x86/boot/compressed/Makefile 2007-12-18 21:55:57.000000000 +0000 +++ e/arch/x86/boot/compressed/Makefile 2007-12-27 09:13:47.000000000 +0000 @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -50,6 +50,23 @@ $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) endif + +ifdef CONFIG_RELOCATABLE +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE + $(call if_changed,bzip2) +else +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) +endif + +ifdef CONFIG_RELOCATABLE +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE + $(call if_changed,lzma) +else +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) +endif + LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T else @@ -55,6 +73,12 @@ $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) + +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) + LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T endif @@ -65,6 +76,9 @@ LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T endif +suffix_$(CONFIG_KERNEL_GZIP) = gz +suffix_$(CONFIG_KERNEL_BZIP2) = bz2 +suffix_$(CONFIG_KERNEL_LZMA) = lzma -$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE +$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE $(call if_changed,ld) --- e/arch/x86/boot/compressed/misc.c 2007-12-27 10:02:06.000000000 +0000 +++ e/arch/x86/boot/compressed/misc.c 2007-12-27 11:58:57.000000000 +0000 @@ -104,6 +104,7 @@ #define OF(args) args #define STATIC static +#define MYSTATIC static #undef memset #undef memcpy @@ -121,9 +122,12 @@ * always be larger than our output buffer. */ +#ifdef CONFIG_KERNEL_GZIP static uch *inbuf; /* input buffer */ +#endif static uch *window; /* Sliding window buffer, (and final output buffer) */ +#ifdef CONFIG_KERNEL_GZIP static unsigned insize; /* valid bytes in inbuf */ static unsigned inptr; /* index of next byte to be processed in inbuf */ static unsigned outcnt; /* bytes in output buffer */ @@ -158,9 +162,14 @@ static int fill_inbuf(void); static void flush_window(void); +#endif + static void error(char *m); + +#ifdef CONFIG_KERNEL_GZIP static void gzip_mark(void **); static void gzip_release(void **); +#endif /* * This is set up by the setup-routine at boot-time @@ -181,7 +190,9 @@ static void *malloc(int size); static void free(void *where); +#if (defined CONFIG_KERNEL_GZIP || defined CONFIG_KERNEL_BZIP2) static void *memset(void *s, int c, unsigned n); +#endif static void *memcpy(void *dest, const void *src, unsigned n); static void putstr(const char *); @@ -189,8 +200,12 @@ #ifdef CONFIG_X86_64 #define HEAP_SIZE 0x7000 #else +#if (defined CONFIG_KERNEL_BZIP2 || defined CONFIG_KERNEL_LZMA) +#define HEAP_SIZE 0x400000 +#else #define HEAP_SIZE 0x4000 #endif +#endif static char *vidmem = (char *)0xb8000; static int vidport; @@ -199,7 +214,29 @@ void *xquad_portio; #endif +#if (defined CONFIG_KERNEL_BZIP2 || defined CONFIG_KERNEL_LZMA) + +#define large_malloc malloc +#define large_free free + +#ifdef current +#undef current +#endif + +#define INCLUDED +#endif + +#ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/inflate.c" +#endif + +#ifdef CONFIG_KERNEL_BZIP2 +#include "../../../../lib/decompress_bunzip2.c" +#endif + +#ifdef CONFIG_KERNEL_LZMA +#include "../../../../lib/decompress_unlzma.c" +#endif static void *malloc(int size) { @@ -223,6 +260,7 @@ { /* Don't care */ } +#ifdef CONFIG_KERNEL_GZIP static void gzip_mark(void **ptr) { *ptr = (void *) free_mem_ptr; @@ -232,6 +270,7 @@ { free_mem_ptr = (unsigned long) *ptr; } +#endif static void scroll(void) { @@ -279,6 +318,7 @@ outb_p(0xff & (pos >> 1), vidport+1); } +#if (defined CONFIG_KERNEL_GZIP || defined CONFIG_KERNEL_BZIP2) static void* memset(void* s, int c, unsigned n) { int i; @@ -287,6 +327,7 @@ for (i=0;i. (An older + version of this functionality (bzip2 only), for 2.4, was + supplied by Christian Ludwig) + + High compression options are mostly useful for users, who + are low on disk space (embedded systems), but for whom ram + size matters less. + + If in doubt, select 'gzip' + +config KERNEL_GZIP + bool "Gzip" + help + The old and tries gzip compression. Its compression ratio is + the poorest among the 3 choices; however its speed (both + compression and decompression) is the fastest. + +config KERNEL_BZIP2 + bool "Bzip2" + help + Its compression ratio and speed is intermediate. + Decompression speed is slowest among the 3. + The kernel size is about 10 per cent smaller with bzip2, + in comparison to gzip. + Bzip2 uses a large amount of memory. For modern kernels + you will need at least 8MB RAM or more for booting. + +config KERNEL_LZMA + bool "LZMA" + help + The most recent compression algorithm. + Its ratio is best, decompression speed is between the other + 2. Compression is slowest. + The kernel size is about 33 per cent smaller with lzma, + in comparison to gzip. + +endchoice + + config SWAP bool "Support for paging of anonymous memory (swap)" depends on MMU && BLOCK diff -urN linux-2.6.23.12/init/do_mounts_rd.c linux-2.6.23.12udpcast/init/do_mounts_rd.c --- linux-2.6.23.12/init/do_mounts_rd.c 2007-12-18 21:55:57.000000000 +0000 +++ linux-2.6.23.12udpcast/init/do_mounts_rd.c 2007-12-27 10:58:08.000000000 +0000 @@ -11,6 +11,16 @@ #include "do_mounts.h" +#ifdef CONFIG_RD_BZIP2 +#include +#undef STATIC +#endif + +#ifdef CONFIG_RD_LZMA +#include +#undef STATIC +#endif + #define BUILD_CRAMDISK int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */ @@ -30,7 +30,15 @@ } __setup("ramdisk_start=", ramdisk_start_setup); +#ifdef CONFIG_RD_GZIP static int __init crd_load(int in_fd, int out_fd); +#endif +#ifdef CONFIG_RD_BZIP2 +static int __init crd_load_bzip2(int in_fd, int out_fd); +#endif +#ifdef CONFIG_RD_LZMA +static int __init crd_load_lzma(int in_fd, int out_fd); +#endif /* * This routine tries to find a RAM disk image to load, and returns the @@ -46,7 +54,7 @@ * gzip */ static int __init -identify_ramdisk_image(int fd, int start_block) +identify_ramdisk_image(int fd, int start_block, int *ztype) { const int size = 512; struct minix_super_block *minixsb; @@ -72,6 +80,7 @@ sys_lseek(fd, start_block * BLOCK_SIZE, 0); sys_read(fd, buf, size); +#ifdef CONFIG_RD_GZIP /* * If it matches the gzip magic numbers, return -1 */ @@ -79,9 +88,40 @@ printk(KERN_NOTICE "RAMDISK: Compressed image found at block %d\n", start_block); + *ztype = 0; + nblocks = 0; + goto done; + } +#endif + +#ifdef CONFIG_RD_BZIP2 + /* + * If it matches the bzip magic numbers, return -1 + */ + if (buf[0] == 0x42 && (buf[1] == 0x5a)) { + printk(KERN_NOTICE + "RAMDISK: Bzipped image found at block %d\n", + start_block); + *ztype = 1; nblocks = 0; goto done; } +#endif + +#ifdef CONFIG_RD_LZMA + /* + * If it matches the bzip magic numbers, return -1 + */ + if (buf[0] == 0x5d && (buf[1] == 0x00)) { + printk(KERN_NOTICE + "RAMDISK: Lzma image found at block %d\n", + start_block); + *ztype = 2; + nblocks = 0; + goto done; + } +#endif + /* romfs is at block zero too */ if (romfsb->word0 == ROMSB_WORD0 && @@ -145,6 +185,7 @@ int nblocks, i, disk; char *buf = NULL; unsigned short rotate = 0; + int ztype=-1; #if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES) char rotator[4] = { '|' , '/' , '-' , '\\' }; #endif @@ -157,14 +198,38 @@ if (in_fd < 0) goto noclose_input; - nblocks = identify_ramdisk_image(in_fd, rd_image_start); + nblocks = identify_ramdisk_image(in_fd, rd_image_start, &ztype); if (nblocks < 0) goto done; if (nblocks == 0) { #ifdef BUILD_CRAMDISK - if (crd_load(in_fd, out_fd) == 0) - goto successful_load; + switch(ztype) { + +#ifdef CONFIG_RD_GZIP + case 0: + if (crd_load(in_fd, out_fd) == 0) + goto successful_load; + break; +#endif + +#ifdef CONFIG_RD_BZIP2 + case 1: + if (crd_load_bzip2(in_fd, out_fd) == 0) + goto successful_load; + break; +#endif + +#ifdef CONFIG_RD_LZMA + case 2: + if (crd_load_lzma(in_fd, out_fd) == 0) + goto successful_load; + break; +#endif + + default: + break; + } #else printk(KERN_NOTICE "RAMDISK: Kernel does not support compressed " @@ -269,6 +334,7 @@ #ifdef BUILD_CRAMDISK +#ifdef CONFIG_RD_GZIP /* * gzip declarations */ @@ -296,8 +362,11 @@ static int exit_code; static int unzip_error; static long bytes_out; +#endif + static int crd_infd, crd_outfd; +#ifdef CONFIG_RD_GZIP #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) /* Diagnostic functions (stubbed out) */ @@ -359,7 +438,22 @@ return inbuf[0]; } +#endif +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA) +static int __init compr_fill(void *buf, unsigned int len) +{ + int r = sys_read(crd_infd, buf, len); + if(r < 0) { + printk(KERN_ERR "RAMDISK: error while reading compressed data"); + } else if(r == 0) { + printk(KERN_ERR "RAMDISK: EOF while reading compressed data"); + } + return r; +} +#endif + +#ifdef CONFIG_RD_GZIP /* =========================================================================== * Write the output window window[0..outcnt-1] and update crc and bytes_out. * (Used for the decompressed data only.) @@ -385,7 +479,24 @@ bytes_out += (ulg)outcnt; outcnt = 0; } +#endif +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA) +static int __init compr_flush(void *window, unsigned int outcnt) { + static int progressDots=0; + int written = sys_write(crd_outfd, window, outcnt); + if (written != outcnt) { + printk(KERN_ERR "RAMDISK: incomplete write (%d != %d)\n", + written, outcnt); + } + progressDots = (progressDots+1)%10; + if(!progressDots) + printk("."); + return outcnt; +} +#endif + +#ifdef CONFIG_RD_GZIP static void __init error(char *x) { printk(KERN_ERR "%s\n", x); @@ -425,5 +536,43 @@ kfree(window); return result; } +#endif + +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA) +static int __init crd_load_compr(int in_fd, int out_fd, int size, + int (*deco)(char *,int, + int(*fill)(void*,unsigned int), + int(*flush)(void*,unsigned int), + int *)) +{ + int result; + char *inbuf = kmalloc(size, GFP_KERNEL); + crd_infd = in_fd; + crd_outfd = out_fd; + if (inbuf == 0) { + printk(KERN_ERR "RAMDISK: Couldn't allocate decompression buffer\n"); + return -1; + } + result=deco(inbuf, 0, compr_fill, compr_flush, NULL); + kfree(inbuf); + printk("\n"); + return result; +} +#endif + +#ifdef CONFIG_RD_BZIP2 +static int __init crd_load_bzip2(int in_fd, int out_fd) +{ + return crd_load_compr(in_fd, out_fd, BZIP2_IOBUF_SIZE, bunzip2); +} +#endif + +#ifdef CONFIG_RD_LZMA +static int __init crd_load_lzma(int in_fd, int out_fd) +{ + return crd_load_compr(in_fd, out_fd, LZMA_IOBUF_SIZE, unlzma); +} + +#endif #endif /* BUILD_CRAMDISK */ diff -urN linux-2.6.23.12/init/initramfs.c linux-2.6.23.12udpcast/init/initramfs.c --- linux-2.6.23.12/init/initramfs.c 2007-12-18 21:55:57.000000000 +0000 +++ linux-2.6.23.12udpcast/init/initramfs.c 2007-12-27 12:52:46.000000000 +0000 @@ -7,6 +7,15 @@ #include #include +/* We need to enable RD_GZIP unconditionnally, as the built-in + * initramfs is gzip-compressed, alas! + * We can only wonder why, though, as the whole kernel (which contains + * built-in initramfs) is gzip (or bzip) compressed anyways afterwards... + */ +#ifndef CONFIG_RD_GZIP +#define CONFIG_RD_GZIP +#endif + static __initdata char *message; static void __init error(char *x) { @@ -347,11 +356,14 @@ return len - count; } -static void __init flush_buffer(char *buf, unsigned len) + +static int __init flush_buffer(void *bufv, unsigned len) { + char *buf = (char *) bufv; int written; + int origLen = len; if (message) - return; + return -1; while ((written = write_buffer(buf, len)) < len && !message) { char c = buf[written]; if (c == '0') { @@ -365,8 +377,12 @@ } else error("junk in compressed archive"); } + return origLen; } +static unsigned inptr; /* index of next byte to be processed in inbuf */ + +#ifdef CONFIG_RD_GZIP /* * gzip declarations */ @@ -388,7 +404,6 @@ static uch *window; static unsigned insize; /* valid bytes in inbuf */ -static unsigned inptr; /* index of next byte to be processed in inbuf */ static unsigned outcnt; /* bytes in output buffer */ static long bytes_out; @@ -412,6 +427,16 @@ #include "../lib/inflate.c" +#ifdef CONFIG_RD_BZIP2 +#include + +#endif + +#ifdef CONFIG_RD_LZMA +#include + +#endif + static void __init gzip_mark(void **ptr) { } @@ -440,6 +465,7 @@ bytes_out += (ulg)outcnt; outcnt = 0; } +#endif static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only) { @@ -448,9 +474,11 @@ header_buf = malloc(110); symlink_buf = malloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1); name_buf = malloc(N_ALIGN(PATH_MAX)); +#ifdef CONFIG_RD_GZIP window = malloc(WSIZE); if (!window || !header_buf || !symlink_buf || !name_buf) panic("can't allocate buffers"); +#endif state = Start; this_header = 0; message = NULL; @@ -470,6 +498,7 @@ continue; } this_header = 0; +#ifdef CONFIG_RD_GZIP insize = len; inbuf = buf; inptr = 0; @@ -477,14 +506,38 @@ bytes_out = 0; crc = (ulg)0xffffffffL; /* shift register contents */ makecrc(); - gunzip(); + if(!gunzip() && message == NULL) + goto ok; +#endif + +#ifdef CONFIG_RD_BZIP2 + message = NULL; /* Zero out message, or else cpio will + think an error has already occured */ + if(!bunzip2(buf, len, NULL, flush_buffer, &inptr) < 0 && + message == NULL) { + goto ok; + } +#endif + +#ifdef CONFIG_RD_LZMA + message = NULL; /* Zero out message, or else cpio will + think an error has already occured */ + if(!unlzma(buf, len, NULL, flush_buffer, &inptr) < 0 && + message == NULL) { + goto ok; + } +#endif + ok: + if (state != Reset) - error("junk in gzipped archive"); + error("junk in compressed archive"); this_header = saved_offset + inptr; buf += inptr; len -= inptr; } +#ifdef CONFIG_RD_GZIP free(window); +#endif free(name_buf); free(symlink_buf); free(header_buf); diff -urN linux-2.6.23.12/lib/Makefile linux-2.6.23.12udpcast/lib/Makefile --- linux-2.6.23.12/lib/Makefile 2007-12-18 21:55:57.000000000 +0000 +++ linux-2.6.23.12udpcast/lib/Makefile 2007-12-27 09:13:47.000000000 +0000 @@ -48,6 +48,10 @@ obj-$(CONFIG_LIBCRC32C) += libcrc32c.o obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o +obj-$(CONFIG_RD_BZIP2) += decompress_bunzip2.o +obj-$(CONFIG_RD_LZMA) += decompress_unlzma.o + + obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ diff -urN linux-2.6.23.12/lib/decompress_bunzip2.c linux-2.6.23.12udpcast/lib/decompress_bunzip2.c --- linux-2.6.23.12/lib/decompress_bunzip2.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.23.12udpcast/lib/decompress_bunzip2.c 2007-12-27 10:56:15.000000000 +0000 @@ -0,0 +1,649 @@ +/* vi: set sw=4 ts=4: */ +/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). + + Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), + which also acknowledges contributions by Mike Burrows, David Wheeler, + Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, + Robert Sedgewick, and Jon L. Bentley. + + This code is licensed under the LGPLv2: + LGPL (http://www.gnu.org/copyleft/lgpl.html +*/ + +/* + Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). + + More efficient reading of Huffman codes, a streamlined read_bunzip() + function, and various other tweaks. In (limited) tests, approximately + 20% faster than bzcat on x86 and about 10% faster on arm. + + Note that about 2/3 of the time is spent in read_unzip() reversing + the Burrows-Wheeler transformation. Much of that time is delay + resulting from cache misses. + + I would ask that anyone benefiting from this work, especially those + using it in commercial products, consider making a donation to my local + non-profit hospice organization in the name of the woman I loved, who + passed away Feb. 12, 2003. + + In memory of Toni W. Hagan + + Hospice of Acadiana, Inc. + 2600 Johnston St., Suite 200 + Lafayette, LA 70503-3240 + + Phone (337) 232-1234 or 1-800-738-2226 + Fax (337) 232-1297 + + http://www.hospiceacadiana.com/ + + Manuel + */ + +/* + Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu) +*/ + + +#ifndef STATIC + +#include +#include +#include + +#ifdef TEST +#include "test.h" +#else +#include +#endif + +static void __init *large_malloc(size_t size) +{ + return vmalloc(size); +} + +static void __init large_free(void *where) +{ + vfree(where); +} + +#ifndef TEST +static void __init *malloc(size_t size) +{ + return kmalloc(size, GFP_KERNEL); +} + +static void __init free(void *where) +{ + kfree(where); +} + +static void __init error(char *x) +{ + printk(KERN_ERR "%s\n", x); +} +#endif + +#define STATIC /**/ + +#endif + +#ifndef INIT +#define INIT +#endif + +#include + + +/* Constants for Huffman coding */ +#define MAX_GROUPS 6 +#define GROUP_SIZE 50 /* 64 would have been more efficient */ +#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ +#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ +#define SYMBOL_RUNA 0 +#define SYMBOL_RUNB 1 + +/* Status return values */ +#define RETVAL_OK 0 +#define RETVAL_LAST_BLOCK (-1) +#define RETVAL_NOT_BZIP_DATA (-2) +#define RETVAL_UNEXPECTED_INPUT_EOF (-3) +#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4) +#define RETVAL_DATA_ERROR (-5) +#define RETVAL_OUT_OF_MEMORY (-6) +#define RETVAL_OBSOLETE_INPUT (-7) + + +/* This is what we know about each Huffman coding group */ +struct group_data { + /* We have an extra slot at the end of limit[] for a sentinal value. */ + int limit[MAX_HUFCODE_BITS+1],base[MAX_HUFCODE_BITS],permute[MAX_SYMBOLS]; + int minLen, maxLen; +}; + +/* Structure holding all the housekeeping data, including IO buffers and + memory that persists between calls to bunzip */ +typedef struct { + /* State for interrupting output loop */ + int writeCopies,writePos,writeRunCountdown,writeCount,writeCurrent; + /* I/O tracking data (file handles, buffers, positions, etc.) */ + int (*fill)(void*,unsigned int); + int inbufCount,inbufPos /*,outbufPos*/; + unsigned char *inbuf /*,*outbuf*/; + unsigned int inbufBitCount, inbufBits; + /* The CRC values stored in the block header and calculated from the data */ + unsigned int crc32Table[256],headerCRC, totalCRC, writeCRC; + /* Intermediate buffer and its size (in bytes) */ + unsigned int *dbuf, dbufSize; + /* These things are a bit too big to go on the stack */ + unsigned char selectors[32768]; /* nSelectors=15 bits */ + struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ + int io_error; /* non-zero if we have IO error */ +} bunzip_data; + + +/* Return the next nnn bits of input. All reads from the compressed input + are done through this function. All reads are big endian */ +static unsigned int INIT get_bits(bunzip_data *bd, char bits_wanted) +{ + unsigned int bits=0; + + /* If we need to get more data from the byte buffer, do so. (Loop getting + one byte at a time to enforce endianness and avoid unaligned access.) */ + while (bd->inbufBitCountinbufPos==bd->inbufCount) { + if(bd->io_error) + return 0; + if((bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE)) <= 0) { + bd->io_error=RETVAL_UNEXPECTED_INPUT_EOF; + return 0; + } + bd->inbufPos=0; + } + /* Avoid 32-bit overflow (dump bit buffer to top of output) */ + if(bd->inbufBitCount>=24) { + bits=bd->inbufBits&((1<inbufBitCount)-1); + bits_wanted-=bd->inbufBitCount; + bits<<=bits_wanted; + bd->inbufBitCount=0; + } + /* Grab next 8 bits of input from buffer. */ + bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++]; + bd->inbufBitCount+=8; + } + /* Calculate result */ + bd->inbufBitCount-=bits_wanted; + bits|=(bd->inbufBits>>bd->inbufBitCount)&((1<dbuf; + dbufSize=bd->dbufSize; + selectors=bd->selectors; + + /* Read in header signature and CRC, then validate signature. + (last block signature means CRC is for whole file, return now) */ + i = get_bits(bd,24); + j = get_bits(bd,24); + bd->headerCRC=get_bits(bd,32); + if ((i == 0x177245) && (j == 0x385090)) return RETVAL_LAST_BLOCK; + if ((i != 0x314159) || (j != 0x265359)) return RETVAL_NOT_BZIP_DATA; + /* We can add support for blockRandomised if anybody complains. There was + some code for this in busybox 1.0.0-pre3, but nobody ever noticed that + it didn't actually work. */ + if(get_bits(bd,1)) return RETVAL_OBSOLETE_INPUT; + if((origPtr=get_bits(bd,24)) > dbufSize) return RETVAL_DATA_ERROR; + /* mapping table: if some byte values are never used (encoding things + like ascii text), the compression code removes the gaps to have fewer + symbols to deal with, and writes a sparse bitfield indicating which + values were present. We make a translation table to convert the symbols + back to the corresponding bytes. */ + t=get_bits(bd, 16); + symTotal=0; + for (i=0;i<16;i++) { + if(t&(1<<(15-i))) { + k=get_bits(bd,16); + for(j=0;j<16;j++) + if(k&(1<<(15-j))) symToByte[symTotal++]=(16*i)+j; + } + } + /* How many different Huffman coding groups does this block use? */ + groupCount=get_bits(bd,3); + if (groupCount<2 || groupCount>MAX_GROUPS) return RETVAL_DATA_ERROR; + /* nSelectors: Every GROUP_SIZE many symbols we select a new Huffman coding + group. Read in the group selector list, which is stored as MTF encoded + bit runs. (MTF=Move To Front, as each value is used it's moved to the + start of the list.) */ + if(!(nSelectors=get_bits(bd, 15))) return RETVAL_DATA_ERROR; + for(i=0; i=groupCount) return RETVAL_DATA_ERROR; + /* Decode MTF to get the next selector */ + uc = mtfSymbol[j]; + for(;j;j--) mtfSymbol[j] = mtfSymbol[j-1]; + mtfSymbol[0]=selectors[i]=uc; + } + /* Read the Huffman coding tables for each group, which code for symTotal + literal symbols, plus two run symbols (RUNA, RUNB) */ + symCount=symTotal+2; + for (j=0; j (MAX_HUFCODE_BITS-1)) + return RETVAL_DATA_ERROR; + /* If first bit is 0, stop. Else second bit indicates whether + to increment or decrement the value. Optimization: grab 2 + bits and unget the second if the first was 0. */ + k = get_bits(bd,2); + if (k < 2) { + bd->inbufBitCount++; + break; + } + /* Add one if second bit 1, else subtract 1. Avoids if/else */ + t+=(((k+1)&2)-1); + } + /* Correct for the initial -1, to get the final symbol length */ + length[i]=t+1; + } + /* Find largest and smallest lengths in this group */ + minLen=maxLen=length[0]; + for(i = 1; i < symCount; i++) { + if(length[i] > maxLen) maxLen = length[i]; + else if(length[i] < minLen) minLen = length[i]; + } + /* Calculate permute[], base[], and limit[] tables from length[]. + * + * permute[] is the lookup table for converting Huffman coded symbols + * into decoded symbols. base[] is the amount to subtract from the + * value of a Huffman symbol of a given length when using permute[]. + * + * limit[] indicates the largest numerical value a symbol with a given + * number of bits can have. This is how the Huffman codes can vary in + * length: each code with a value>limit[length] needs another bit. + */ + hufGroup=bd->groups+j; + hufGroup->minLen = minLen; + hufGroup->maxLen = maxLen; + /* Note that minLen can't be smaller than 1, so we adjust the base + and limit array pointers so we're not always wasting the first + entry. We do this again when using them (during symbol decoding).*/ + base=hufGroup->base-1; + limit=hufGroup->limit-1; + /* Calculate permute[]. Concurently, initialize temp[] and limit[]. */ + pp=0; + for(i=minLen;i<=maxLen;i++) { + temp[i]=limit[i]=0; + for(t=0;tpermute[pp++] = t; + } + /* Count symbols coded for at each bit length */ + for (i=0;ilimit[length] comparison. */ + limit[i]= (pp << (maxLen - i)) - 1; + pp<<=1; + base[i+1]=pp-(t+=temp[i]); + } + limit[maxLen+1] = INT_MAX; /* Sentinal value for reading next sym. */ + limit[maxLen]=pp+temp[maxLen]-1; + base[minLen]=0; + } + /* We've finished reading and digesting the block header. Now read this + block's Huffman coded symbols from the file and undo the Huffman coding + and run length encoding, saving the result into dbuf[dbufCount++]=uc */ + + /* Initialize symbol occurrence counters and symbol Move To Front table */ + for(i=0;i<256;i++) { + byteCount[i] = 0; + mtfSymbol[i]=(unsigned char)i; + } + /* Loop through compressed symbols. */ + runPos=dbufCount=symCount=selector=0; + for(;;) { + /* Determine which Huffman coding group to use. */ + if(!(symCount--)) { + symCount=GROUP_SIZE-1; + if(selector>=nSelectors) return RETVAL_DATA_ERROR; + hufGroup=bd->groups+selectors[selector++]; + base=hufGroup->base-1; + limit=hufGroup->limit-1; + } + /* Read next Huffman-coded symbol. */ + /* Note: It is far cheaper to read maxLen bits and back up than it is + to read minLen bits and then an additional bit at a time, testing + as we go. Because there is a trailing last block (with file CRC), + there is no danger of the overread causing an unexpected EOF for a + valid compressed file. As a further optimization, we do the read + inline (falling back to a call to get_bits if the buffer runs + dry). The following (up to got_huff_bits:) is equivalent to + j=get_bits(bd,hufGroup->maxLen); + */ + while (bd->inbufBitCountmaxLen) { + if(bd->inbufPos==bd->inbufCount) { + j = get_bits(bd,hufGroup->maxLen); + goto got_huff_bits; + } + bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++]; + bd->inbufBitCount+=8; + }; + bd->inbufBitCount-=hufGroup->maxLen; + j = (bd->inbufBits>>bd->inbufBitCount)&((1<maxLen)-1); +got_huff_bits: + /* Figure how how many bits are in next symbol and unget extras */ + i=hufGroup->minLen; + while(j>limit[i]) ++i; + bd->inbufBitCount += (hufGroup->maxLen - i); + /* Huffman decode value to get nextSym (with bounds checking) */ + if ((i > hufGroup->maxLen) + || (((unsigned)(j=(j>>(hufGroup->maxLen-i))-base[i])) + >= MAX_SYMBOLS)) + return RETVAL_DATA_ERROR; + nextSym = hufGroup->permute[j]; + /* We have now decoded the symbol, which indicates either a new literal + byte, or a repeated run of the most recent literal byte. First, + check if nextSym indicates a repeated run, and if so loop collecting + how many times to repeat the last literal. */ + if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */ + /* If this is the start of a new run, zero out counter */ + if(!runPos) { + runPos = 1; + t = 0; + } + /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at + each bit position, add 1 or 2 instead. For example, + 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2. + You can make any bit pattern that way using 1 less symbol than + the basic or 0/1 method (except all bits 0, which would use no + symbols, but a run of length 0 doesn't mean anything in this + context). Thus space is saved. */ + t += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */ + runPos <<= 1; + continue; + } + /* When we hit the first non-run symbol after a run, we now know + how many times to repeat the last literal, so append that many + copies to our buffer of decoded symbols (dbuf) now. (The last + literal used is the one at the head of the mtfSymbol array.) */ + if(runPos) { + runPos=0; + if(dbufCount+t>=dbufSize) return RETVAL_DATA_ERROR; + + uc = symToByte[mtfSymbol[0]]; + byteCount[uc] += t; + while(t--) dbuf[dbufCount++]=uc; + } + /* Is this the terminating symbol? */ + if(nextSym>symTotal) break; + /* At this point, nextSym indicates a new literal character. Subtract + one to get the position in the MTF array at which this literal is + currently to be found. (Note that the result can't be -1 or 0, + because 0 and 1 are RUNA and RUNB. But another instance of the + first symbol in the mtf array, position 0, would have been handled + as part of a run above. Therefore 1 unused mtf position minus + 2 non-literal nextSym values equals -1.) */ + if(dbufCount>=dbufSize) return RETVAL_DATA_ERROR; + i = nextSym - 1; + uc = mtfSymbol[i]; + /* Adjust the MTF array. Since we typically expect to move only a + * small number of symbols, and are bound by 256 in any case, using + * memmove here would typically be bigger and slower due to function + * call overhead and other assorted setup costs. */ + do { + mtfSymbol[i] = mtfSymbol[i-1]; + } while (--i); + mtfSymbol[0] = uc; + uc=symToByte[uc]; + /* We have our literal byte. Save it into dbuf. */ + byteCount[uc]++; + dbuf[dbufCount++] = (unsigned int)uc; + } + /* At this point, we've read all the Huffman-coded symbols (and repeated + runs) for this block from the input stream, and decoded them into the + intermediate buffer. There are dbufCount many decoded bytes in dbuf[]. + Now undo the Burrows-Wheeler transform on dbuf. + See http://dogma.net/markn/articles/bwt/bwt.htm + */ + /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ + j=0; + for(i=0;i<256;i++) { + k=j+byteCount[i]; + byteCount[i] = j; + j=k; + } + /* Figure out what order dbuf would be in if we sorted it. */ + for (i=0;i=dbufCount) return RETVAL_DATA_ERROR; + bd->writePos=dbuf[origPtr]; + bd->writeCurrent=(unsigned char)(bd->writePos&0xff); + bd->writePos>>=8; + bd->writeRunCountdown=5; + } + bd->writeCount=dbufCount; + + return RETVAL_OK; +} + +/* Undo burrows-wheeler transform on intermediate buffer to produce output. + If start_bunzip was initialized with out_fd=-1, then up to len bytes of + data are written to outbuf. Return value is number of bytes written or + error (all errors are negative numbers). If out_fd!=-1, outbuf and len + are ignored, data is written to out_fd and return is RETVAL_OK or error. +*/ + +static int INIT read_bunzip(bunzip_data *bd, char *outbuf, int len) +{ + const unsigned int *dbuf; + int pos,xcurrent,previous,gotcount; + + /* If last read was short due to end of file, return last block now */ + if(bd->writeCount<0) return bd->writeCount; + + gotcount = 0; + dbuf=bd->dbuf; + pos=bd->writePos; + xcurrent=bd->writeCurrent; + + /* We will always have pending decoded data to write into the output + buffer unless this is the very first call (in which case we haven't + Huffman-decoded a block into the intermediate buffer yet). */ + + if (bd->writeCopies) { + /* Inside the loop, writeCopies means extra copies (beyond 1) */ + --bd->writeCopies; + /* Loop outputting bytes */ + for(;;) { + /* If the output buffer is full, snapshot state and return */ + if(gotcount >= len) { + bd->writePos=pos; + bd->writeCurrent=xcurrent; + bd->writeCopies++; + return len; + } + /* Write next byte into output buffer, updating CRC */ + outbuf[gotcount++] = xcurrent; + bd->writeCRC=(((bd->writeCRC)<<8) + ^bd->crc32Table[((bd->writeCRC)>>24)^xcurrent]); + /* Loop now if we're outputting multiple copies of this byte */ + if (bd->writeCopies) { + --bd->writeCopies; + continue; + } +decode_next_byte: + if (!bd->writeCount--) break; + /* Follow sequence vector to undo Burrows-Wheeler transform */ + previous=xcurrent; + pos=dbuf[pos]; + xcurrent=pos&0xff; + pos>>=8; + /* After 3 consecutive copies of the same byte, the 4th is a repeat + count. We count down from 4 instead + * of counting up because testing for non-zero is faster */ + if(--bd->writeRunCountdown) { + if(xcurrent!=previous) bd->writeRunCountdown=4; + } else { + /* We have a repeated run, this byte indicates the count */ + bd->writeCopies=xcurrent; + xcurrent=previous; + bd->writeRunCountdown=5; + /* Sometimes there are just 3 bytes (run length 0) */ + if(!bd->writeCopies) goto decode_next_byte; + /* Subtract the 1 copy we'd output anyway to get extras */ + --bd->writeCopies; + } + } + /* Decompression of this block completed successfully */ + bd->writeCRC=~bd->writeCRC; + bd->totalCRC=((bd->totalCRC<<1) | (bd->totalCRC>>31)) ^ bd->writeCRC; + /* If this block had a CRC error, force file level CRC error. */ + if(bd->writeCRC!=bd->headerCRC) { + bd->totalCRC=bd->headerCRC+1; + return RETVAL_LAST_BLOCK; + } + } + + /* Refill the intermediate buffer by Huffman-decoding next block of input */ + /* (previous is just a convenient unused temp variable here) */ + previous=get_next_block(bd); + if(previous) { + bd->writeCount=previous; + return (previous!=RETVAL_LAST_BLOCK) ? previous : gotcount; + } + bd->writeCRC=0xffffffffUL; + pos=bd->writePos; + xcurrent=bd->writeCurrent; + goto decode_next_byte; +} + +static int INIT nofill(void *buf,unsigned int len) { + return -1; +} + +/* Allocate the structure, read file header. If in_fd==-1, inbuf must contain + a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are + ignored, and data is read from file handle into temporary buffer. */ +static int INIT start_bunzip(bunzip_data **bdp, void *inbuf, int len, + int (*fill)(void*,unsigned int)) +{ + bunzip_data *bd; + unsigned int i,j,c; + const unsigned int BZh0=(((unsigned int)'B')<<24)+(((unsigned int)'Z')<<16) + +(((unsigned int)'h')<<8)+(unsigned int)'0'; + + /* Figure out how much data to allocate */ + i=sizeof(bunzip_data); + + /* Allocate bunzip_data. Most fields initialize to zero. */ + bd=*bdp=malloc(i); + memset(bd,0,sizeof(bunzip_data)); + /* Setup input buffer */ + bd->inbuf=inbuf; + bd->inbufCount=len; + if(fill != NULL) + bd->fill=fill; + else + bd->fill=nofill; + + /* Init the CRC32 table (big endian) */ + for(i=0;i<256;i++) { + c=i<<24; + for(j=8;j;j--) + c=c&0x80000000 ? (c<<1)^0x04c11db7 : (c<<1); + bd->crc32Table[i]=c; + } + + /* Ensure that file starts with "BZh['1'-'9']." */ + i = get_bits(bd,32); + if (((unsigned int)(i-BZh0-1)) >= 9) return RETVAL_NOT_BZIP_DATA; + + /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of + uncompressed data. Allocate intermediate buffer for block. */ + bd->dbufSize=100000*(i-BZh0); + + bd->dbuf=large_malloc(bd->dbufSize * sizeof(int)); + return RETVAL_OK; +} + +/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip data, + not end of file.) */ +STATIC int INIT bunzip2(char *inbuf, int len, + int(*fill)(void*,unsigned int), + int(*flush)(void*,unsigned int), + int *pos) +{ + char *outbuf; + bunzip_data *bd; + int i; + + outbuf=malloc(BZIP2_IOBUF_SIZE); + if(!(i=start_bunzip(&bd,inbuf,len,fill))) { + for(;;) { + if((i=read_bunzip(bd,outbuf,BZIP2_IOBUF_SIZE)) <= 0) break; + if(i!=flush(outbuf,i)) { + i=RETVAL_UNEXPECTED_OUTPUT_EOF; + break; + } + } + } + /* Check CRC and release memory */ + if(i==RETVAL_LAST_BLOCK) { + if (bd->headerCRC!=bd->totalCRC) { + error("Data integrity error when decompressing."); + } else { + i=RETVAL_OK; + } + } + else if (i==RETVAL_UNEXPECTED_OUTPUT_EOF) { + error("Compressed file ends unexpectedly"); + } + if(bd->dbuf) large_free(bd->dbuf); + if(pos) + *pos = bd->inbufPos; + free(bd); + free(outbuf); + + return i; +} + diff -urN linux-2.6.23.12/lib/decompress_unlzma.c linux-2.6.23.12udpcast/lib/decompress_unlzma.c --- linux-2.6.23.12/lib/decompress_unlzma.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.23.12udpcast/lib/decompress_unlzma.c 2007-12-27 12:52:37.000000000 +0000 @@ -0,0 +1,612 @@ +/* Lzma decompressor for Linux kernel. Shamelessly snarfed + * from busybox 1.1.1 + * + * Linux kernel adaptation + * Copyright (C) 2006 Alain + * + * Based on small lzma deflate implementation/Small range coder + * implementation for lzma. + * Copyright (C) 2006 Aurelien Jacobs + * + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) + * Copyright (C) 1999-2005 Igor Pavlov + * + * Copyrights of the parts, see headers below. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef STATIC + +#include +#include +#include + +#ifdef TEST +#include "test.h" +#else +#include +#endif + +static void __init *large_malloc(size_t size) +{ + return vmalloc(size); +} + +static void __init large_free(void *where) +{ + vfree(where); +} + +#ifndef TEST + +#if 0 +static void __init *malloc(size_t size) +{ + return kmalloc(size, GFP_KERNEL); +} +#endif + +static void __init free(void *where) +{ + kfree(where); +} + +static void __init error(char *x) +{ + printk(KERN_ERR "%s\n", x); +} + +#endif + +#define STATIC /**/ + +#endif + +#ifndef INIT +#define INIT +#endif + +#include + +#define MIN(a,b) (((a)<(b))?(a):(b)) + +static long long INIT read_int(unsigned char *ptr, int size) +{ + int i; + long long ret=0; + + for(i=0; i + * + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) + * Copyright (c) 1999-2005 Igor Pavlov + */ + +#ifndef always_inline +# if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >0) +# define always_inline __attribute__((always_inline)) inline +# else +# define always_inline inline +# endif +#endif + +#ifdef CONFIG_FEATURE_LZMA_FAST +# define speed_inline always_inline +#else +# define speed_inline +#endif + + +typedef struct { + int (*fill)(void*,unsigned int); + uint8_t *ptr; + uint8_t *buffer; + uint8_t *buffer_end; + int buffer_size; + uint32_t code; + uint32_t range; + uint32_t bound; +} rc_t; + + +#define RC_TOP_BITS 24 +#define RC_MOVE_BITS 5 +#define RC_MODEL_TOTAL_BITS 11 + + +/* Called twice: once at startup and once in rc_normalize() */ +static void INIT rc_read(rc_t * rc) +{ + rc->buffer_size = rc->fill((char*)rc->buffer, LZMA_IOBUF_SIZE); + if (rc->buffer_size <= 0) + error("unexpected EOF"); + rc->ptr = rc->buffer; + rc->buffer_end = rc->buffer + rc->buffer_size; +} + +/* Called once */ +static always_inline void INIT rc_init(rc_t * rc, int (*fill)(void*,unsigned int), + char *buffer, int buffer_size) +{ + rc->fill = fill; + rc->buffer = (uint8_t *)buffer; + rc->buffer_size = buffer_size; + rc->buffer_end = rc->buffer + rc->buffer_size; + rc->ptr = rc->buffer; + + rc->code = 0; + rc->range = 0xFFFFFFFF; +} + +static always_inline void INIT rc_init_code(rc_t * rc) +{ + int i; + + for (i = 0; i < 5; i++) { + if (rc->ptr >= rc->buffer_end) + rc_read(rc); + rc->code = (rc->code << 8) | *rc->ptr++; + } +} + + +/* Called once. TODO: bb_maybe_free() */ +static always_inline void INIT rc_free(rc_t * rc) +{ + free(rc->buffer); +} + +/* Called twice, but one callsite is in speed_inline'd rc_is_bit_0_helper() */ +static void INIT rc_do_normalize(rc_t * rc) +{ + if (rc->ptr >= rc->buffer_end) + rc_read(rc); + rc->range <<= 8; + rc->code = (rc->code << 8) | *rc->ptr++; +} +static always_inline void INIT rc_normalize(rc_t * rc) +{ + if (rc->range < (1 << RC_TOP_BITS)) { + rc_do_normalize(rc); + } +} + +/* Called 9 times */ +/* Why rc_is_bit_0_helper exists? + * Because we want to always expose (rc->code < rc->bound) to optimizer + */ +static speed_inline uint32_t INIT rc_is_bit_0_helper(rc_t * rc, uint16_t * p) +{ + rc_normalize(rc); + rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); + return rc->bound; +} +static always_inline int INIT rc_is_bit_0(rc_t * rc, uint16_t * p) +{ + uint32_t t = rc_is_bit_0_helper(rc, p); + return rc->code < t; +} + +/* Called ~10 times, but very small, thus inlined */ +static speed_inline void INIT rc_update_bit_0(rc_t * rc, uint16_t * p) +{ + rc->range = rc->bound; + *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; +} +static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p) +{ + rc->range -= rc->bound; + rc->code -= rc->bound; + *p -= *p >> RC_MOVE_BITS; +} + +/* Called 4 times in unlzma loop */ +static int INIT rc_get_bit(rc_t * rc, uint16_t * p, int *symbol) +{ + if (rc_is_bit_0(rc, p)) { + rc_update_bit_0(rc, p); + *symbol *= 2; + return 0; + } else { + rc_update_bit_1(rc, p); + *symbol = *symbol * 2 + 1; + return 1; + } +} + +/* Called once */ +static always_inline int INIT rc_direct_bit(rc_t * rc) +{ + rc_normalize(rc); + rc->range >>= 1; + if (rc->code >= rc->range) { + rc->code -= rc->range; + return 1; + } + return 0; +} + +/* Called twice */ +static speed_inline void INIT +rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol) +{ + int i = num_levels; + + *symbol = 1; + while (i--) + rc_get_bit(rc, p + *symbol, symbol); + *symbol -= 1 << num_levels; +} + + +/* + * Small lzma deflate implementation. + * Copyright (C) 2006 Aurelien Jacobs + * + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) + * Copyright (C) 1999-2005 Igor Pavlov + */ + + +typedef struct { + uint8_t pos; + uint32_t dict_size; + uint64_t dst_size; +} __attribute__ ((packed)) lzma_header_t; + + +#define LZMA_BASE_SIZE 1846 +#define LZMA_LIT_SIZE 768 + +#define LZMA_NUM_POS_BITS_MAX 4 + +#define LZMA_LEN_NUM_LOW_BITS 3 +#define LZMA_LEN_NUM_MID_BITS 3 +#define LZMA_LEN_NUM_HIGH_BITS 8 + +#define LZMA_LEN_CHOICE 0 +#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1) +#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1) +#define LZMA_LEN_MID (LZMA_LEN_LOW \ + + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))) +#define LZMA_LEN_HIGH (LZMA_LEN_MID \ + +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))) +#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)) + +#define LZMA_NUM_STATES 12 +#define LZMA_NUM_LIT_STATES 7 + +#define LZMA_START_POS_MODEL_INDEX 4 +#define LZMA_END_POS_MODEL_INDEX 14 +#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1)) + +#define LZMA_NUM_POS_SLOT_BITS 6 +#define LZMA_NUM_LEN_TO_POS_STATES 4 + +#define LZMA_NUM_ALIGN_BITS 4 + +#define LZMA_MATCH_MIN_LEN 2 + +#define LZMA_IS_MATCH 0 +#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES <= rc.buffer_end) + rc_read(&rc); + ((unsigned char *)&header)[i] = *rc.ptr++; + } + + if (header.pos >= (9 * 5 * 5)) + error("bad header"); + + mi = header.pos / 9; + lc = header.pos % 9; + pb = mi / 5; + lp = mi % 5; + pos_state_mask = (1 << pb) - 1; + literal_pos_mask = (1 << lp) - 1; + + ENDIAN_CONVERT(header.dict_size); + ENDIAN_CONVERT(header.dst_size); + + if (header.dict_size == 0) + header.dict_size = 1; + + bufsize = MIN(header.dst_size, header.dict_size); + buffer = large_malloc(bufsize); + if(buffer == NULL) + return -1; + + num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); + p = large_malloc(num_probs * sizeof(*p)); + num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); + for (i = 0; i < num_probs; i++) + p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1; + + rc_init_code(&rc); + + while (global_pos + buffer_pos < header.dst_size) { + int pos_state = (buffer_pos + global_pos) & pos_state_mask; + + prob = + p + LZMA_IS_MATCH + (state << LZMA_NUM_POS_BITS_MAX) + pos_state; + if (rc_is_bit_0(&rc, prob)) { + mi = 1; + rc_update_bit_0(&rc, prob); + prob = (p + LZMA_LITERAL + (LZMA_LIT_SIZE + * ((((buffer_pos + global_pos) & literal_pos_mask) << lc) + + (previous_byte >> (8 - lc))))); + + if (state >= LZMA_NUM_LIT_STATES) { + int match_byte; + + pos = buffer_pos - rep0; + while (pos >= header.dict_size) + pos += header.dict_size; + if(pos >= bufsize) { + goto fail; + } + match_byte = buffer[pos]; + do { + int bit; + + match_byte <<= 1; + bit = match_byte & 0x100; + prob_lit = prob + 0x100 + bit + mi; + if (rc_get_bit(&rc, prob_lit, &mi)) { + if (!bit) + break; + } else { + if (bit) + break; + } + } while (mi < 0x100); + } + while (mi < 0x100) { + prob_lit = prob + mi; + rc_get_bit(&rc, prob_lit, &mi); + } + previous_byte = (uint8_t) mi; + + buffer[buffer_pos++] = previous_byte; + if (buffer_pos == header.dict_size) { + buffer_pos = 0; + global_pos += header.dict_size; + flush(buffer, header.dict_size); + } + if (state < 4) + state = 0; + else if (state < 10) + state -= 3; + else + state -= 6; + } else { + int offset; + uint16_t *prob_len; + + rc_update_bit_1(&rc, prob); + prob = p + LZMA_IS_REP + state; + if (rc_is_bit_0(&rc, prob)) { + rc_update_bit_0(&rc, prob); + rep3 = rep2; + rep2 = rep1; + rep1 = rep0; + state = state < LZMA_NUM_LIT_STATES ? 0 : 3; + prob = p + LZMA_LEN_CODER; + } else { + rc_update_bit_1(&rc, prob); + prob = p + LZMA_IS_REP_G0 + state; + if (rc_is_bit_0(&rc, prob)) { + rc_update_bit_0(&rc, prob); + prob = (p + LZMA_IS_REP_0_LONG + + (state << LZMA_NUM_POS_BITS_MAX) + pos_state); + if (rc_is_bit_0(&rc, prob)) { + rc_update_bit_0(&rc, prob); + + state = state < LZMA_NUM_LIT_STATES ? 9 : 11; + pos = buffer_pos - rep0; + while (pos >= header.dict_size) + pos += header.dict_size; + if(pos >= bufsize) { + goto fail; + } + previous_byte = buffer[pos]; + buffer[buffer_pos++] = previous_byte; + if (buffer_pos == header.dict_size) { + buffer_pos = 0; + global_pos += header.dict_size; + flush((char*)buffer, header.dict_size); + } + continue; + } else { + rc_update_bit_1(&rc, prob); + } + } else { + uint32_t distance; + + rc_update_bit_1(&rc, prob); + prob = p + LZMA_IS_REP_G1 + state; + if (rc_is_bit_0(&rc, prob)) { + rc_update_bit_0(&rc, prob); + distance = rep1; + } else { + rc_update_bit_1(&rc, prob); + prob = p + LZMA_IS_REP_G2 + state; + if (rc_is_bit_0(&rc, prob)) { + rc_update_bit_0(&rc, prob); + distance = rep2; + } else { + rc_update_bit_1(&rc, prob); + distance = rep3; + rep3 = rep2; + } + rep2 = rep1; + } + rep1 = rep0; + rep0 = distance; + } + state = state < LZMA_NUM_LIT_STATES ? 8 : 11; + prob = p + LZMA_REP_LEN_CODER; + } + + prob_len = prob + LZMA_LEN_CHOICE; + if (rc_is_bit_0(&rc, prob_len)) { + rc_update_bit_0(&rc, prob_len); + prob_len = (prob + LZMA_LEN_LOW + + (pos_state << LZMA_LEN_NUM_LOW_BITS)); + offset = 0; + num_bits = LZMA_LEN_NUM_LOW_BITS; + } else { + rc_update_bit_1(&rc, prob_len); + prob_len = prob + LZMA_LEN_CHOICE_2; + if (rc_is_bit_0(&rc, prob_len)) { + rc_update_bit_0(&rc, prob_len); + prob_len = (prob + LZMA_LEN_MID + + (pos_state << LZMA_LEN_NUM_MID_BITS)); + offset = 1 << LZMA_LEN_NUM_LOW_BITS; + num_bits = LZMA_LEN_NUM_MID_BITS; + } else { + rc_update_bit_1(&rc, prob_len); + prob_len = prob + LZMA_LEN_HIGH; + offset = ((1 << LZMA_LEN_NUM_LOW_BITS) + + (1 << LZMA_LEN_NUM_MID_BITS)); + num_bits = LZMA_LEN_NUM_HIGH_BITS; + } + } + rc_bit_tree_decode(&rc, prob_len, num_bits, &len); + len += offset; + + if (state < 4) { + int pos_slot; + + state += LZMA_NUM_LIT_STATES; + prob = + p + LZMA_POS_SLOT + + ((len < + LZMA_NUM_LEN_TO_POS_STATES ? len : + LZMA_NUM_LEN_TO_POS_STATES - 1) + << LZMA_NUM_POS_SLOT_BITS); + rc_bit_tree_decode(&rc, prob, LZMA_NUM_POS_SLOT_BITS, + &pos_slot); + if (pos_slot >= LZMA_START_POS_MODEL_INDEX) { + num_bits = (pos_slot >> 1) - 1; + rep0 = 2 | (pos_slot & 1); + if (pos_slot < LZMA_END_POS_MODEL_INDEX) { + rep0 <<= num_bits; + prob = p + LZMA_SPEC_POS + rep0 - pos_slot - 1; + } else { + num_bits -= LZMA_NUM_ALIGN_BITS; + while (num_bits--) + rep0 = (rep0 << 1) | rc_direct_bit(&rc); + prob = p + LZMA_ALIGN; + rep0 <<= LZMA_NUM_ALIGN_BITS; + num_bits = LZMA_NUM_ALIGN_BITS; + } + i = 1; + mi = 1; + while (num_bits--) { + if (rc_get_bit(&rc, prob + mi, &mi)) + rep0 |= i; + i <<= 1; + } + } else + rep0 = pos_slot; + if (++rep0 == 0) + break; + } + + len += LZMA_MATCH_MIN_LEN; + + do { + pos = buffer_pos - rep0; + while (pos >= header.dict_size) + pos += header.dict_size; + if(pos >= bufsize) { + goto fail; + } + previous_byte = buffer[pos]; + buffer[buffer_pos++] = previous_byte; + if (buffer_pos == header.dict_size) { + buffer_pos = 0; + global_pos += header.dict_size; + flush((char*)buffer, header.dict_size); + } + len--; + } while (len != 0 && buffer_pos < header.dst_size); + } + } + + flush(buffer, buffer_pos); + if(posp) { + *posp = rc.ptr-rc.buffer; + } + large_free(buffer); + return 0; + fail: + large_free(buffer); + return -1; +} diff -urN linux-2.6.23.12/scripts/Makefile.lib linux-2.6.23.12udpcast/scripts/Makefile.lib --- linux-2.6.23.12/scripts/Makefile.lib 2007-12-18 21:55:57.000000000 +0000 +++ linux-2.6.23.12udpcast/scripts/Makefile.lib 2007-12-27 12:58:52.000000000 +0000 @@ -162,4 +162,17 @@ quiet_cmd_gzip = GZIP $@ cmd_gzip = gzip -f -9 < $< > $@ +# Append size +size_append=perl -e 'print(pack("i",(stat($$ARGV[0]))[7]));' +# Bzip2 +# --------------------------------------------------------------------------- + +quiet_cmd_bzip2 = BZIP2 $@ +cmd_bzip2 = (bzip2 -9 < $< ; $(size_append) $<) > $@ + +# Lzma +# --------------------------------------------------------------------------- + +quiet_cmd_lzma = LZMA $@ +cmd_lzma = (lzma --format=alone -9 -c $< ; $(size_append) $<) >$@