diff -Naur lighttpd-1.4.10.orig/src/Makefile.am lighttpd-1.4.10/src/Makefile.am --- lighttpd-1.4.10.orig/src/Makefile.am 2006-01-04 06:08:03.000000000 -0800 +++ lighttpd-1.4.10/src/Makefile.am 2006-02-16 22:25:16.504347041 -0800 @@ -28,7 +28,7 @@ mod_ssi_expr.c: mod_ssi_exprparser.h common_src=buffer.c log.c \ - keyvalue.c chunk.c \ + keyvalue.c chunk.c chunk_encode.c \ http_chunk.c stream.c fdevent.c \ stat_cache.c plugin.c joblist.c etag.c array.c \ data_string.c data_count.c data_array.c \ @@ -222,9 +222,14 @@ mod_accesslog_la_LDFLAGS = -module -export-dynamic -avoid-version -no-undefined mod_accesslog_la_LIBADD = $(common_libadd) +lib_LTLIBRARIES += mod_deflate.la +mod_deflate_la_SOURCES = mod_deflate.c +mod_deflate_la_LDFLAGS = -module -export-dynamic -avoid-version -no-undefined +mod_deflate_la_LIBADD = $(Z_LIB) $(BZ_LIB) $(common_libadd) + hdr = server.h buffer.h network.h log.h keyvalue.h \ - response.h request.h fastcgi.h chunk.h \ + response.h request.h fastcgi.h chunk.h chunk_encode.h \ settings.h http_chunk.h http_auth_digest.h \ md5.h http_auth.h stream.h \ fdevent.h connections.h base.h stat_cache.h \ diff -Naur lighttpd-1.4.10.orig/src/base.h lighttpd-1.4.10/src/base.h --- lighttpd-1.4.10.orig/src/base.h 2006-01-11 06:51:04.000000000 -0800 +++ lighttpd-1.4.10/src/base.h 2006-02-16 22:25:16.504347041 -0800 @@ -345,8 +345,10 @@ int file_started; int file_finished; + int end_chunk; /* used for chunked transfer encoding. */ - chunkqueue *write_queue; /* a large queue for low-level write ( HTTP response ) [ file, mem ] */ + chunkqueue *write_queue; /* a large queue for HTTP response content [ file, mem ] */ + chunkqueue *output_queue; /* a large queue for low-level write ( HTTP response ) [ file, mem ] */ chunkqueue *read_queue; /* a small queue for low-level read ( HTTP request ) [ mem ] */ chunkqueue *request_content_queue; /* takes request-content into tempfile if necessary [ tempfile, mem ]*/ @@ -570,6 +572,7 @@ connections *conns; connections *joblist; + connections *joblist_prev; connections *fdwaitqueue; stat_cache *stat_cache; diff -Naur lighttpd-1.4.10.orig/src/chunk.c lighttpd-1.4.10/src/chunk.c --- lighttpd-1.4.10.orig/src/chunk.c 2005-11-18 05:18:19.000000000 -0800 +++ lighttpd-1.4.10/src/chunk.c 2006-02-16 22:25:16.505346873 -0800 @@ -224,6 +224,16 @@ return 0; } +int chunkqueue_append_chunkqueue(chunkqueue *cq, chunkqueue *src) { + if(src == NULL) return 0; + chunkqueue_append_chunk(cq, src->first); + cq->last = src->last; + src->first = NULL; + src->last = NULL; + + return 0; +} + buffer * chunkqueue_get_prepend_buffer(chunkqueue *cq) { chunk *c; diff -Naur lighttpd-1.4.10.orig/src/chunk.h lighttpd-1.4.10/src/chunk.h --- lighttpd-1.4.10.orig/src/chunk.h 2005-10-31 23:32:21.000000000 -0800 +++ lighttpd-1.4.10/src/chunk.h 2006-02-16 22:25:16.505346873 -0800 @@ -51,6 +51,7 @@ int chunkqueue_append_file(chunkqueue *c, buffer *fn, off_t offset, off_t len); int chunkqueue_append_mem(chunkqueue *c, const char *mem, size_t len); int chunkqueue_append_buffer(chunkqueue *c, buffer *mem); +int chunkqueue_append_chunkqueue(chunkqueue *cq, chunkqueue *src); int chunkqueue_prepend_buffer(chunkqueue *c, buffer *mem); buffer * chunkqueue_get_append_buffer(chunkqueue *c); diff -Naur lighttpd-1.4.10.orig/src/chunk_encode.c lighttpd-1.4.10/src/chunk_encode.c --- lighttpd-1.4.10.orig/src/chunk_encode.c 1969-12-31 16:00:00.000000000 -0800 +++ lighttpd-1.4.10/src/chunk_encode.c 2006-02-16 22:25:16.505346873 -0800 @@ -0,0 +1,117 @@ +/** + * the HTTP chunk-API + * + * + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "server.h" +#include "chunk.h" +#include "chunk_encode.h" +#include "log.h" + +static int chunk_encode_append_len(chunkqueue *cq, size_t len) { + size_t i, olen = len, j; + buffer *b; + + /*b = srv->tmp_chunk_len;*/ + /*b = buffer_init();*/ + b = chunkqueue_get_append_buffer(cq); + + if (len == 0) { + buffer_copy_string(b, "0"); + } else { + for (i = 0; i < 8 && len; i++) { + len >>= 4; + } + + /* i is the number of hex digits we have */ + buffer_prepare_copy(b, i + 1); + + for (j = i-1, len = olen; j+1 > 0; j--) { + b->ptr[j] = (len & 0xf) + (((len & 0xf) <= 9) ? '0' : 'a' - 10); + len >>= 4; + } + b->used = i; + b->ptr[b->used++] = '\0'; + } + + buffer_append_string(b, "\r\n"); + /* + chunkqueue_append_buffer(cq, b); + buffer_free(b); + */ + + return 0; +} + + +int chunk_encode_append_file(chunkqueue *cq, buffer *fn, off_t offset, off_t len) { + if (!cq) return -1; + if (len == 0) return 0; + + chunk_encode_append_len(cq, len); + + chunkqueue_append_file(cq, fn, offset, len); + + chunkqueue_append_mem(cq, "\r\n", 2 + 1); + + return 0; +} + +int chunk_encode_append_buffer(chunkqueue *cq, buffer *mem) { + if (!cq) return -1; + if (mem->used <= 1) return 0; + + chunk_encode_append_len(cq, mem->used - 1); + + chunkqueue_append_buffer(cq, mem); + + chunkqueue_append_mem(cq, "\r\n", 2 + 1); + + return 0; +} + +int chunk_encode_append_mem(chunkqueue *cq, const char * mem, size_t len) { + if (!cq) return -1; + if (len <= 1) return 0; + + chunk_encode_append_len(cq, len - 1); + + chunkqueue_append_mem(cq, mem, len); + + chunkqueue_append_mem(cq, "\r\n", 2 + 1); + + return 0; +} + +int chunk_encode_append_queue(chunkqueue *cq, chunkqueue *src) { + int len = chunkqueue_length(src); + if (!cq) return -1; + if (len == 0) return 0; + + chunk_encode_append_len(cq, len); + + chunkqueue_append_chunkqueue(cq, src); + + chunkqueue_append_mem(cq, "\r\n", 2 + 1); + + return 0; +} + +int chunk_encode_end(chunkqueue *cq) { + chunk_encode_append_len(cq, 0); + chunkqueue_append_mem(cq, "\r\n", 2 + 1); + return 0; +} + diff -Naur lighttpd-1.4.10.orig/src/chunk_encode.h lighttpd-1.4.10/src/chunk_encode.h --- lighttpd-1.4.10.orig/src/chunk_encode.h 1969-12-31 16:00:00.000000000 -0800 +++ lighttpd-1.4.10/src/chunk_encode.h 2006-02-16 22:25:16.506346704 -0800 @@ -0,0 +1,13 @@ +#ifndef _CHUNK_ENCODE_H_ +#define _CHUNK_ENCODE_H_ + +#include "server.h" +#include + +int chunk_encode_append_mem(chunkqueue *cq, const char * mem, size_t len); +int chunk_encode_append_buffer(chunkqueue *cq, buffer *mem); +int chunk_encode_append_file(chunkqueue *cq, buffer *fn, off_t offset, off_t len); +int chunk_encode_append_queue(chunkqueue *cq, chunkqueue *src); +int chunk_encode_end(chunkqueue *cq); + +#endif diff -Naur lighttpd-1.4.10.orig/src/connections.c lighttpd-1.4.10/src/connections.c --- lighttpd-1.4.10.orig/src/connections.c 2006-02-08 04:27:20.000000000 -0800 +++ lighttpd-1.4.10/src/connections.c 2006-02-16 22:25:16.507346536 -0800 @@ -18,6 +18,7 @@ #include "response.h" #include "network.h" #include "http_chunk.h" +#include "chunk_encode.h" #include "stat_cache.h" #include "joblist.h" @@ -146,6 +147,12 @@ return 0; } +int connection_queue_is_empty(connection *con) { + if(!chunkqueue_is_empty(con->write_queue)) return 0; + if(!chunkqueue_is_empty(con->output_queue)) return 0; + return 1; +} + #if 0 static void dump_packet(const unsigned char *data, size_t len) { size_t i, j; @@ -365,6 +372,7 @@ con->file_finished = 1; chunkqueue_reset(con->write_queue); + chunkqueue_reset(con->output_queue); } break; default: @@ -472,12 +480,27 @@ /* disable chunked encoding again as we have no body */ con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED; chunkqueue_reset(con->write_queue); + chunkqueue_reset(con->output_queue); con->file_finished = 1; break; } + /* Allow filter plugins to change response headers before they are written. */ + switch(plugins_call_handle_response_start(srv, con)) { + case HANDLER_GO_ON: + case HANDLER_FINISHED: + /* response start is finished */ + break; + default: + /* something strange happend */ + log_error_write(srv, __FILE__, __LINE__, "s", "Filter plugin failed."); + connection_set_state(srv, con, CON_STATE_ERROR); + joblist_append(srv, con); + break; + } + if (con->file_finished) { /* we have all the content and chunked encoding is not used, set a content-length */ @@ -517,6 +540,7 @@ if (con->request.http_method == HTTP_METHOD_HEAD) { chunkqueue_reset(con->write_queue); + chunkqueue_reset(con->output_queue); } http_response_write_header(srv, con); @@ -525,11 +549,57 @@ } static int connection_handle_write(server *srv, connection *con) { - switch(network_write_chunkqueue(srv, con, con->write_queue)) { + int finished = 0; + int len; + + /* Allow filter plugins to modify response conent */ + switch(plugins_call_handle_response_filter(srv, con)) { + case HANDLER_GO_ON: + finished = con->file_finished; + /* response content not changed */ + break; + case HANDLER_COMEBACK: + /* response filter has more work */ + finished = 0; + break; + case HANDLER_FINISHED: + /* response filter is finished */ + finished = 1; + break; + default: + /* something strange happend */ + log_error_write(srv, __FILE__, __LINE__, "s", "Filter plugin failed."); + connection_set_state(srv, con, CON_STATE_ERROR); + joblist_append(srv, con); + finished = 1; + break; + } + + /* move chunks from write_queue to output_queue. */ + if (con->request.http_method == HTTP_METHOD_HEAD) { + chunkqueue_reset(con->write_queue); + } else { + len = chunkqueue_length(con->write_queue); + if(con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) { + chunk_encode_append_queue(con->output_queue, con->write_queue); + if(finished && !con->end_chunk) { + con->end_chunk = 1; + chunk_encode_end(con->output_queue); + } + } else { + chunkqueue_append_chunkqueue(con->output_queue, con->write_queue); + } + con->write_queue->bytes_out += len; + } + /* write chunks from output_queue to network */ + switch(network_write_chunkqueue(srv, con, con->output_queue)) { case 0: - if (con->file_finished) { + if (finished) { connection_set_state(srv, con, CON_STATE_RESPONSE_END); joblist_append(srv, con); + } else { + /* not finished yet -> WRITE */ + con->is_writable = 1; } break; case -1: /* error on our side */ @@ -599,6 +669,7 @@ #undef CLEAN con->write_queue = chunkqueue_init(); + con->output_queue = chunkqueue_init(); con->read_queue = chunkqueue_init(); con->request_content_queue = chunkqueue_init(); chunkqueue_set_tempdirs(con->request_content_queue, srv->srvconf.upload_tempdirs); @@ -627,6 +698,7 @@ connection_reset(srv, con); chunkqueue_free(con->write_queue); + chunkqueue_free(con->output_queue); chunkqueue_free(con->read_queue); chunkqueue_free(con->request_content_queue); array_free(con->request.headers); @@ -681,6 +753,7 @@ con->http_status = 0; con->file_finished = 0; con->file_started = 0; + con->end_chunk = 0; con->got_response = 0; con->parsed_response = 0; @@ -751,6 +824,7 @@ array_reset(con->environment); chunkqueue_reset(con->write_queue); + chunkqueue_reset(con->output_queue); chunkqueue_reset(con->request_content_queue); /* the plugins should cleanup themself */ @@ -1178,7 +1252,6 @@ } if (con->state == CON_STATE_WRITE && - !chunkqueue_is_empty(con->write_queue) && con->is_writable) { if (-1 == connection_handle_write(srv, con)) { @@ -1573,15 +1646,15 @@ } /* only try to write if we have something in the queue */ - if (!chunkqueue_is_empty(con->write_queue)) { #if 0 + if (!connection_queue_is_empty(con)) { log_error_write(srv, __FILE__, __LINE__, "dsd", con->fd, "packets to write:", - con->write_queue->used); -#endif + con->output_queue->used); } - if (!chunkqueue_is_empty(con->write_queue) && con->is_writable) { +#endif + if (con->is_writable) { if (-1 == connection_handle_write(srv, con)) { log_error_write(srv, __FILE__, __LINE__, "ds", con->fd, @@ -1691,9 +1764,9 @@ * - if we have data to write * - if the socket is not writable yet */ - if (!chunkqueue_is_empty(con->write_queue) && - (con->is_writable == 0) && - (con->traffic_limit_reached == 0)) { + if ((con->is_writable == 0) && + (con->traffic_limit_reached == 0) && + !connection_queue_is_empty(con)) { fdevent_event_add(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_OUT); } else { fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd); diff -Naur lighttpd-1.4.10.orig/src/http_chunk.c lighttpd-1.4.10/src/http_chunk.c --- lighttpd-1.4.10.orig/src/http_chunk.c 2005-08-10 15:26:50.000000000 -0700 +++ lighttpd-1.4.10/src/http_chunk.c 2006-02-16 22:25:16.508346367 -0800 @@ -58,16 +58,9 @@ cq = con->write_queue; - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) { - http_chunk_append_len(srv, con, len); - } - + chunkqueue_append_file(cq, fn, offset, len); - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED && len > 0) { - chunkqueue_append_mem(cq, "\r\n", 2 + 1); - } - return 0; } @@ -78,16 +71,9 @@ cq = con->write_queue; - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) { - http_chunk_append_len(srv, con, mem->used - 1); - } - + chunkqueue_append_buffer(cq, mem); - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED && mem->used > 0) { - chunkqueue_append_mem(cq, "\r\n", 2 + 1); - } - return 0; } @@ -99,25 +85,11 @@ cq = con->write_queue; if (len == 0) { - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) { - http_chunk_append_len(srv, con, 0); - chunkqueue_append_mem(cq, "\r\n", 2 + 1); - } else { - chunkqueue_append_mem(cq, "", 1); - } return 0; } - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) { - http_chunk_append_len(srv, con, len - 1); - } - chunkqueue_append_mem(cq, mem, len); - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) { - chunkqueue_append_mem(cq, "\r\n", 2 + 1); - } - return 0; } diff -Naur lighttpd-1.4.10.orig/src/joblist.c lighttpd-1.4.10/src/joblist.c --- lighttpd-1.4.10.orig/src/joblist.c 2005-08-10 15:26:41.000000000 -0700 +++ lighttpd-1.4.10/src/joblist.c 2006-02-16 22:25:16.508346367 -0800 @@ -7,6 +7,7 @@ int joblist_append(server *srv, connection *con) { if (con->in_joblist) return 0; + con->in_joblist = 1; if (srv->joblist->size == 0) { srv->joblist->size = 16; diff -Naur lighttpd-1.4.10.orig/src/mod_deflate.c lighttpd-1.4.10/src/mod_deflate.c --- lighttpd-1.4.10.orig/src/mod_deflate.c 1969-12-31 16:00:00.000000000 -0800 +++ lighttpd-1.4.10/src/mod_deflate.c 2006-02-17 23:26:45.885437687 -0800 @@ -0,0 +1,1291 @@ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "base.h" +#include "log.h" +#include "buffer.h" +#include "response.h" +#include "joblist.h" +#include "stat_cache.h" + +#include "plugin.h" + +#include "crc32.h" +#include "etag.h" + +#if defined HAVE_ZLIB_H && defined HAVE_LIBZ +# define USE_ZLIB +# include +#else +# define Z_DEFAULT_COMPRESSION 1 +#endif + +#if defined HAVE_BZLIB_H && defined HAVE_LIBBZ2 +# define USE_BZ2LIB +/* we don't need stdio interface */ +# define BZ_NO_STDIO +# include +#endif + +#include "sys-mmap.h" + +/* request: accept-encoding */ +#define HTTP_ACCEPT_ENCODING_IDENTITY BV(0) +#define HTTP_ACCEPT_ENCODING_GZIP BV(1) +#define HTTP_ACCEPT_ENCODING_DEFLATE BV(2) +#define HTTP_ACCEPT_ENCODING_COMPRESS BV(3) +#define HTTP_ACCEPT_ENCODING_BZIP2 BV(4) + +#define KByte * 1024 +#define MByte * 1024 KByte +#define GByte * 1024 MByte + +typedef struct { + unsigned short debug; + unsigned short enabled; + unsigned short bzip2; + unsigned short sync_flush; + unsigned short output_buffer_size; + unsigned short min_compress_size; + unsigned short work_block_size; + short mem_level; + short compression_level; + short window_size; + array *mimetypes; +} plugin_config; + +typedef struct { + PLUGIN_DATA; + buffer *tmp_buf; + + plugin_config **config_storage; + plugin_config conf; +} plugin_data; + +typedef struct { + int bytes_in; + int bytes_out; + chunkqueue *in_queue; + buffer *output; + /* compression type & state */ + int compression_type; + int stream_open; +#ifdef USE_ZLIB + unsigned long crc; + z_stream z; + unsigned short gzip_header; +#endif +#ifdef USE_BZ2LIB + bz_stream bz; +#endif + plugin_data *plugin_data; +} handler_ctx; + +static handler_ctx *handler_ctx_init() { + handler_ctx *hctx; + + hctx = calloc(1, sizeof(*hctx)); + hctx->in_queue = chunkqueue_init(); + + return hctx; +} + +static void handler_ctx_free(handler_ctx *hctx) { + chunkqueue_free(hctx->in_queue); + free(hctx); +} + +INIT_FUNC(mod_deflate_init) { + plugin_data *p; + + p = calloc(1, sizeof(*p)); + + p->tmp_buf = buffer_init(); + + return p; +} + +FREE_FUNC(mod_deflate_free) { + plugin_data *p = p_d; + + UNUSED(srv); + + if (!p) return HANDLER_GO_ON; + + if (p->config_storage) { + size_t i; + for (i = 0; i < srv->config_context->used; i++) { + plugin_config *s = p->config_storage[i]; + + if (!s) continue; + + array_free(s->mimetypes); + + free(s); + } + free(p->config_storage); + } + + buffer_free(p->tmp_buf); + + free(p); + + return HANDLER_GO_ON; +} + +SETDEFAULTS_FUNC(mod_deflate_setdefaults) { + plugin_data *p = p_d; + size_t i = 0; + + config_values_t cv[] = { + { "deflate.output-buffer-size", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.mimetypes", NULL, T_CONFIG_ARRAY, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.compression-level", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.mem-level", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.window-size", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.min-compress-size", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.work-block-size", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.enabled", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.debug", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.bzip2", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, + { "deflate.sync-flush", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, + { NULL, NULL, T_CONFIG_UNSET, T_CONFIG_SCOPE_UNSET } + }; + + p->config_storage = calloc(1, srv->config_context->used * sizeof(specific_config *)); + + for (i = 0; i < srv->config_context->used; i++) { + plugin_config *s; + + s = calloc(1, sizeof(plugin_config)); + s->enabled = 1; + s->bzip2 = 1; + s->sync_flush = 0; + s->debug = 0; + s->output_buffer_size = 0; + s->mem_level = 9; + s->window_size = 15; + s->min_compress_size = 0; + s->work_block_size = 2048; + s->compression_level = Z_DEFAULT_COMPRESSION; + s->mimetypes = array_init(); + + cv[0].destination = &(s->output_buffer_size); + cv[1].destination = s->mimetypes; + cv[2].destination = &(s->compression_level); + cv[3].destination = &(s->mem_level); + cv[4].destination = &(s->window_size); + cv[5].destination = &(s->min_compress_size); + cv[6].destination = &(s->work_block_size); + cv[7].destination = &(s->enabled); + cv[8].destination = &(s->debug); + cv[9].destination = &(s->bzip2); + cv[10].destination = &(s->sync_flush); + + p->config_storage[i] = s; + + if (0 != config_insert_values_global(srv, ((data_config *)srv->config_context->data[i])->value, cv)) { + return HANDLER_ERROR; + } + + if((s->compression_level < 1 || s->compression_level > 9) && + s->compression_level != Z_DEFAULT_COMPRESSION) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "compression-level must be between 1 and 9:", s->compression_level); + return HANDLER_ERROR; + } + + if(s->mem_level < 1 || s->mem_level > 9) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "mem-level must be between 1 and 9:", s->mem_level); + return HANDLER_ERROR; + } + + if(s->window_size < 1 || s->window_size > 15) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "window-size must be between 1 and 15:", s->window_size); + return HANDLER_ERROR; + } + s->window_size = 0 - s->window_size; + + if(s->sync_flush) { + s->output_buffer_size = 0; + } + } + + return HANDLER_GO_ON; + +} + +#ifdef USE_ZLIB +/* Copied gzip_header from apache 2.2's mod_deflate.c */ +/* RFC 1952 Section 2.3 defines the gzip header: + * + * +---+---+---+---+---+---+---+---+---+---+ + * |ID1|ID2|CM |FLG| MTIME |XFL|OS | + * +---+---+---+---+---+---+---+---+---+---+ + */ +static const char gzip_header[10] = +{ '\037', '\213', Z_DEFLATED, 0, + 0, 0, 0, 0, /* mtime */ + 0, 0x03 /* Unix OS_CODE */ +}; +static int stream_deflate_init(server *srv, connection *con, handler_ctx *hctx) { + plugin_data *p = hctx->plugin_data; + z_stream *z; + + UNUSED(srv); + UNUSED(con); + + z = &(hctx->z); + z->zalloc = Z_NULL; + z->zfree = Z_NULL; + z->opaque = Z_NULL; + z->total_in = 0; + z->total_out = 0; + z->next_out = NULL; + z->avail_out = 0; + + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "output-buffer-size:", p->conf.output_buffer_size); + log_error_write(srv, __FILE__, __LINE__, "sd", + "compression-level:", p->conf.compression_level); + log_error_write(srv, __FILE__, __LINE__, "sd", + "mem-level:", p->conf.mem_level); + log_error_write(srv, __FILE__, __LINE__, "sd", + "window-size:", p->conf.window_size); + log_error_write(srv, __FILE__, __LINE__, "sd", + "min-compress-size:", p->conf.min_compress_size); + log_error_write(srv, __FILE__, __LINE__, "sd", + "work-block-size:", p->conf.work_block_size); + } + if (Z_OK != deflateInit2(z, + p->conf.compression_level, + Z_DEFLATED, + p->conf.window_size, /* supress zlib-header */ + p->conf.mem_level, + Z_DEFAULT_STRATEGY)) { + return -1; + } + hctx->stream_open = 1; + + return 0; +} + +static int stream_deflate_compress(server *srv, connection *con, handler_ctx *hctx, unsigned char *start, off_t st_size) { + plugin_data *p = hctx->plugin_data; + z_stream *z; + int len; + int in = 0, out = 0; + + UNUSED(srv); + z = &(hctx->z); + + if(z->next_out == NULL) { + z->next_out = (unsigned char *)hctx->output->ptr; + z->avail_out = hctx->output->size; + } + + if(hctx->compression_type == HTTP_ACCEPT_ENCODING_GZIP) { + if(hctx->gzip_header == 0) { + hctx->gzip_header = 1; + /* copy gzip header into output buffer */ + buffer_copy_memory(hctx->output, gzip_header, sizeof(gzip_header)); + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "gzip_header len=", sizeof(gzip_header)); + } + /* initialize crc32 */ + hctx->crc = crc32(0L, Z_NULL, 0); + z->next_out = (unsigned char *)(hctx->output->ptr + sizeof(gzip_header)); + z->avail_out = hctx->output->size - sizeof(gzip_header); + } + hctx->crc = crc32(hctx->crc, start, st_size); + } + + z->next_in = start; + z->avail_in = st_size; + hctx->bytes_in += st_size; + + /* compress data */ + in = z->avail_in; + do { + if (Z_OK != deflate(z, Z_NO_FLUSH)) { + deflateEnd(z); + hctx->stream_open = 0; + return -1; + } + + if(z->avail_out == 0 || z->avail_in > 0) { + len = hctx->output->size - z->avail_out; + hctx->bytes_out += len; + out += len; + chunkqueue_append_mem(con->write_queue, hctx->output->ptr, len+1); + z->next_out = (unsigned char *)hctx->output->ptr; + z->avail_out = hctx->output->size; + } + } while (z->avail_in > 0); + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sdsd", + "compress: in=", in, ", out=", out); + } + return 0; +} + +static int stream_deflate_flush(server *srv, connection *con, handler_ctx *hctx, int end) { + plugin_data *p = hctx->plugin_data; + z_stream *z; + int len; + int rc = 0; + int done; + int flush = 1; + int in = 0, out = 0; + + UNUSED(srv); + + z = &(hctx->z); + + if(z->next_out == NULL) { + z->next_out = (unsigned char *)hctx->output->ptr; + z->avail_out = hctx->output->size; + } + /* compress data */ + in = z->avail_in; + do { + done = 1; + if(end) { + rc = deflate(z, Z_FINISH); + if (rc == Z_OK) { + done = 0; + } else if (rc != Z_STREAM_END) { + deflateEnd(z); + hctx->stream_open = 0; + return -1; + } + } else { + if(p->conf.sync_flush) { + rc = deflate(z, Z_SYNC_FLUSH); + } else if(z->avail_in > 0) { + if(p->conf.output_buffer_size > 0) flush = 0; + rc = deflate(z, Z_NO_FLUSH); + } else { + if(p->conf.output_buffer_size > 0) flush = 0; + rc = Z_OK; + } + if (rc != Z_OK) { + deflateEnd(z); + hctx->stream_open = 0; + return -1; + } + } + + len = hctx->output->size - z->avail_out; + if(z->avail_out == 0 || (flush && len > 0)) { + hctx->bytes_out += len; + out += len; + chunkqueue_append_mem(con->write_queue, hctx->output->ptr, len+1); + z->next_out = (unsigned char *)hctx->output->ptr; + z->avail_out = hctx->output->size; + } + } while (z->avail_in != 0 || !done); + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sdsd", + "flush: in=", in, ", out=", out); + } + if(p->conf.sync_flush) { + z->next_out = NULL; + z->avail_out = 0; + } + return 0; +} + +static int stream_deflate_end(server *srv, connection *con, handler_ctx *hctx) { + plugin_data *p = hctx->plugin_data; + z_stream *z; + int rc; + + UNUSED(srv); + + z = &(hctx->z); + if(!hctx->stream_open) return 0; + hctx->stream_open = 0; + + if(hctx->compression_type == HTTP_ACCEPT_ENCODING_GZIP && hctx->bytes_out > 0 && + hctx->bytes_out >= sizeof(gzip_header)) { + /* write gzip footer */ + unsigned char c[8]; + + c[0] = (hctx->crc >> 0) & 0xff; + c[1] = (hctx->crc >> 8) & 0xff; + c[2] = (hctx->crc >> 16) & 0xff; + c[3] = (hctx->crc >> 24) & 0xff; + c[4] = (z->total_in >> 0) & 0xff; + c[5] = (z->total_in >> 8) & 0xff; + c[6] = (z->total_in >> 16) & 0xff; + c[7] = (z->total_in >> 24) & 0xff; + /* append footer to write_queue */ + chunkqueue_append_mem(con->write_queue, (char *)c, 9); + hctx->bytes_out += 8; + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "gzip_footer len=", 8); + } + } + + if ((rc = deflateEnd(z)) != Z_OK) { + if(rc == Z_DATA_ERROR) return 0; + if(z->msg != NULL) { + log_error_write(srv, __FILE__, __LINE__, "sdss", + "deflateEnd error ret=", rc, ", msg=", z->msg); + } else { + log_error_write(srv, __FILE__, __LINE__, "sd", + "deflateEnd error ret=", rc); + } + return -1; + } + return 0; +} + +#endif + +#ifdef USE_BZ2LIB +static int stream_bzip2_init(server *srv, connection *con, handler_ctx *hctx) { + plugin_data *p = hctx->plugin_data; + bz_stream *bz; + + UNUSED(srv); + UNUSED(con); + + bz = &(hctx->bz); + bz->bzalloc = NULL; + bz->bzfree = NULL; + bz->opaque = NULL; + bz->total_in_lo32 = 0; + bz->total_in_hi32 = 0; + bz->total_out_lo32 = 0; + bz->total_out_hi32 = 0; + + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "output-buffer-size:", p->conf.output_buffer_size); + log_error_write(srv, __FILE__, __LINE__, "sd", + "compression-level:", p->conf.compression_level); + log_error_write(srv, __FILE__, __LINE__, "sd", + "mem-level:", p->conf.mem_level); + log_error_write(srv, __FILE__, __LINE__, "sd", + "window-size:", p->conf.window_size); + log_error_write(srv, __FILE__, __LINE__, "sd", + "min-compress-size:", p->conf.min_compress_size); + log_error_write(srv, __FILE__, __LINE__, "sd", + "work-block-size:", p->conf.work_block_size); + } + if (BZ_OK != BZ2_bzCompressInit(bz, + p->conf.compression_level, /* blocksize = 900k */ + 0, /* no output */ + 30)) { /* workFactor: default */ + return -1; + } + hctx->stream_open = 1; + + return 0; +} + +static int stream_bzip2_compress(server *srv, connection *con, handler_ctx *hctx, unsigned char *start, off_t st_size) { + plugin_data *p = hctx->plugin_data; + bz_stream *bz; + int len; + int rc; + int in = 0, out = 0; + + UNUSED(srv); + + bz = &(hctx->bz); + + if(bz->next_out == NULL) { + bz->next_out = hctx->output->ptr; + bz->avail_out = hctx->output->size; + } + + bz->next_in = (char *)start; + bz->avail_in = st_size; + hctx->bytes_in += st_size; + + /* compress data */ + in = bz->avail_in; + do { + rc = BZ2_bzCompress(bz, BZ_RUN); + if (rc != BZ_RUN_OK) { + BZ2_bzCompressEnd(bz); + hctx->stream_open = 0; + return -1; + } + + if(bz->avail_out == 0 || bz->avail_in > 0) { + len = hctx->output->size - bz->avail_out; + hctx->bytes_out += len; + out += len; + chunkqueue_append_mem(con->write_queue, hctx->output->ptr, len+1); + bz->next_out = hctx->output->ptr; + bz->avail_out = hctx->output->size; + } + } while (bz->avail_in > 0); + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sdsd", + "compress: in=", in, ", out=", out); + } + return 0; +} + +static int stream_bzip2_flush(server *srv, connection *con, handler_ctx *hctx, int end) { + plugin_data *p = hctx->plugin_data; + bz_stream *bz; + int len; + int rc; + int done; + int flush = 1; + int in = 0, out = 0; + + UNUSED(srv); + + bz = &(hctx->bz); + + if(bz->next_out == NULL) { + bz->next_out = hctx->output->ptr; + bz->avail_out = hctx->output->size; + } + /* compress data */ + in = bz->avail_in; + do { + done = 1; + if(end) { + rc = BZ2_bzCompress(bz, BZ_FINISH); + if (rc == BZ_FINISH_OK) { + done = 0; + } else if (rc != BZ_STREAM_END) { + BZ2_bzCompressEnd(bz); + hctx->stream_open = 0; + return -1; + } + } else if(bz->avail_in > 0) { + rc = BZ2_bzCompress(bz, BZ_RUN); + if (rc != BZ_RUN_OK) { + BZ2_bzCompressEnd(bz); + hctx->stream_open = 0; + return -1; + } + if(p->conf.output_buffer_size > 0) flush = 0; + } + + len = hctx->output->size - bz->avail_out; + if(bz->avail_out == 0 || (flush && len > 0)) { + hctx->bytes_out += len; + out += len; + chunkqueue_append_mem(con->write_queue, hctx->output->ptr, len+1); + bz->next_out = hctx->output->ptr; + bz->avail_out = hctx->output->size; + } + } while (bz->avail_in != 0 || !done); + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sdsd", + "flush: in=", in, ", out=", out); + } + if(p->conf.sync_flush) { + bz->next_out = NULL; + bz->avail_out = 0; + } + return 0; +} + +static int stream_bzip2_end(server *srv, connection *con, handler_ctx *hctx) { + plugin_data *p = hctx->plugin_data; + bz_stream *bz; + int rc; + + UNUSED(p); + UNUSED(con); + + bz = &(hctx->bz); + if(!hctx->stream_open) return 0; + hctx->stream_open = 0; + + if ((rc = BZ2_bzCompressEnd(bz)) != BZ_OK) { + if(rc == BZ_DATA_ERROR) return 0; + log_error_write(srv, __FILE__, __LINE__, "sd", + "BZ2_bzCompressEnd error ret=", rc); + return -1; + } + return 0; +} + +#endif + +static int mod_deflate_compress(server *srv, connection *con, handler_ctx *hctx, unsigned char *start, off_t st_size) { + int ret = -1; + if(st_size == 0) return 0; + switch(hctx->compression_type) { +#ifdef USE_ZLIB + case HTTP_ACCEPT_ENCODING_GZIP: + case HTTP_ACCEPT_ENCODING_DEFLATE: + ret = stream_deflate_compress(srv, con, hctx, start, st_size); + break; +#endif +#ifdef USE_BZ2LIB + case HTTP_ACCEPT_ENCODING_BZIP2: + ret = stream_bzip2_compress(srv, con, hctx, start, st_size); + break; +#endif + default: + ret = -1; + break; + } + + return ret; +} + +static int mod_deflate_stream_flush(server *srv, connection *con, handler_ctx *hctx, int end) { + int ret = -1; + if(hctx->bytes_in == 0) return 0; + switch(hctx->compression_type) { +#ifdef USE_ZLIB + case HTTP_ACCEPT_ENCODING_GZIP: + case HTTP_ACCEPT_ENCODING_DEFLATE: + ret = stream_deflate_flush(srv, con, hctx, end); + break; +#endif +#ifdef USE_BZ2LIB + case HTTP_ACCEPT_ENCODING_BZIP2: + ret = stream_bzip2_flush(srv, con, hctx, end); + break; +#endif + default: + ret = -1; + break; + } + + return ret; +} + +static int mod_deflate_stream_end(server *srv, connection *con, handler_ctx *hctx) { + int ret = -1; + switch(hctx->compression_type) { +#ifdef USE_ZLIB + case HTTP_ACCEPT_ENCODING_GZIP: + case HTTP_ACCEPT_ENCODING_DEFLATE: + ret = stream_deflate_end(srv, con, hctx); + break; +#endif +#ifdef USE_BZ2LIB + case HTTP_ACCEPT_ENCODING_BZIP2: + ret = stream_bzip2_end(srv, con, hctx); + break; +#endif + default: + ret = -1; + break; + } + + return ret; +} + +static int mod_deflate_file_chunk(server *srv, connection *con, handler_ctx *hctx, chunk *c, off_t st_size) { + plugin_data *p = hctx->plugin_data; + off_t abs_offset; + off_t toSend; + stat_cache_entry *sce = NULL; + off_t we_want_to_mmap = 2 MByte; + off_t we_want_to_send = st_size; + char *start = NULL; + + if (HANDLER_ERROR == stat_cache_get_entry(srv, con, c->file.name, &sce)) { + log_error_write(srv, __FILE__, __LINE__, "sb", + strerror(errno), c->file.name); + return -1; + } + + abs_offset = c->file.start + c->offset; + + if (abs_offset > sce->st.st_size) { + log_error_write(srv, __FILE__, __LINE__, "sb", + "file was shrinked:", c->file.name); + + return -1; + } + + we_want_to_send = st_size; + /* mmap the buffer + * - first mmap + * - new mmap as the we are at the end of the last one */ + if (c->file.mmap.start == MAP_FAILED || + abs_offset == (off_t)(c->file.mmap.offset + c->file.mmap.length)) { + + /* Optimizations for the future: + * + * adaptive mem-mapping + * the problem: + * we mmap() the whole file. If someone has alot large files and 32bit + * machine the virtual address area will be unrun and we will have a failing + * mmap() call. + * solution: + * only mmap 16M in one chunk and move the window as soon as we have finished + * the first 8M + * + * read-ahead buffering + * the problem: + * sending out several large files in parallel trashes the read-ahead of the + * kernel leading to long wait-for-seek times. + * solutions: (increasing complexity) + * 1. use madvise + * 2. use a internal read-ahead buffer in the chunk-structure + * 3. use non-blocking IO for file-transfers + * */ + + /* all mmap()ed areas are 512kb expect the last which might be smaller */ + size_t to_mmap; + + /* this is a remap, move the mmap-offset */ + if (c->file.mmap.start != MAP_FAILED) { + munmap(c->file.mmap.start, c->file.mmap.length); + c->file.mmap.offset += we_want_to_mmap; + } else { + /* in case the range-offset is after the first mmap()ed area we skip the area */ + c->file.mmap.offset = 0; + + while (c->file.mmap.offset + we_want_to_mmap < c->file.start) { + c->file.mmap.offset += we_want_to_mmap; + } + } + + /* length is rel, c->offset too, assume there is no limit at the mmap-boundaries */ + to_mmap = (c->file.start + c->file.length) - c->file.mmap.offset; + if(to_mmap > we_want_to_mmap) to_mmap = we_want_to_mmap; + /* we have more to send than we can mmap() at once */ + if(we_want_to_send > to_mmap) we_want_to_send = to_mmap; + + if (-1 == c->file.fd) { /* open the file if not already open */ + if (-1 == (c->file.fd = open(c->file.name->ptr, O_RDONLY))) { + log_error_write(srv, __FILE__, __LINE__, "sbs", "open failed for:", c->file.name, strerror(errno)); + + return -1; + } +#ifdef FD_CLOEXEC + fcntl(c->file.fd, F_SETFD, FD_CLOEXEC); +#endif + } + + if (MAP_FAILED == (c->file.mmap.start = mmap(0, to_mmap, PROT_READ, MAP_SHARED, c->file.fd, c->file.mmap.offset))) { + /* close it here, otherwise we'd have to set FD_CLOEXEC */ + + log_error_write(srv, __FILE__, __LINE__, "ssbd", "mmap failed:", + strerror(errno), c->file.name, c->file.fd); + + return -1; + } + + c->file.mmap.length = to_mmap; +#ifdef LOCAL_BUFFERING + buffer_copy_string_len(c->mem, c->file.mmap.start, c->file.mmap.length); +#else +#ifdef HAVE_MADVISE + /* don't advise files < 64Kb */ + if (c->file.mmap.length > (64 KByte) && + 0 != madvise(c->file.mmap.start, c->file.mmap.length, MADV_WILLNEED)) { + log_error_write(srv, __FILE__, __LINE__, "ssbd", "madvise failed:", + strerror(errno), c->file.name, c->file.fd); + } +#endif +#endif + + /* chunk_reset() or chunk_free() will cleanup for us */ + } + + /* to_send = abs_mmap_end - abs_offset */ + toSend = (c->file.mmap.offset + c->file.mmap.length) - (abs_offset); + if(toSend > we_want_to_send) toSend = we_want_to_send; + + if (toSend < 0) { + log_error_write(srv, __FILE__, __LINE__, "soooo", + "toSend is negative:", + toSend, + c->file.mmap.length, + abs_offset, + c->file.mmap.offset); + assert(toSend < 0); + } + +#ifdef LOCAL_BUFFERING + start = c->mem->ptr; +#else + start = c->file.mmap.start; +#endif + + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sdsd", + "compress file chunk: offset=", (int)c->offset, + ", toSend=", (int)toSend); + } + if (mod_deflate_compress(srv, con, hctx, + (unsigned char *)start + (abs_offset - c->file.mmap.offset), toSend) < 0) { + log_error_write(srv, __FILE__, __LINE__, "s", + "compress failed."); + return -1; + } + + c->offset += toSend; + if (c->offset == c->file.length) { + /* we don't need the mmaping anymore */ + if (c->file.mmap.start != MAP_FAILED) { + munmap(c->file.mmap.start, c->file.mmap.length); + c->file.mmap.start = MAP_FAILED; + } + } + + return toSend; +} + +static int deflate_compress_cleanup(server *srv, connection *con, handler_ctx *hctx) { + plugin_data *p = hctx->plugin_data; + int rc; + + rc = mod_deflate_stream_end(srv, con, hctx); + if(rc < 0) { + log_error_write(srv, __FILE__, __LINE__, "s", "error closing stream"); + } + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sdsd", + " in:", hctx->bytes_in, + " out:", hctx->bytes_out); + } + + /* cleanup compression state */ + if(hctx->output != p->tmp_buf) { + buffer_free(hctx->output); + } + handler_ctx_free(hctx); + con->plugin_ctx[p->id] = NULL; + + return 0; +} + +static handler_t deflate_compress_response(server *srv, connection *con, handler_ctx *hctx, int end) { + plugin_data *p = hctx->plugin_data; + chunk *c; + size_t chunks_written = 0; + int chunk_finished = 0; + int len = 0; + int rc=-1; + int close_stream = 0; + int out = 0; + int max = 0; + + /* move all chunk from write_queue into our in_queue */ + chunkqueue_append_chunkqueue(hctx->in_queue, con->write_queue); + + len = chunkqueue_length(hctx->in_queue); + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "compress: in_queue len=", len); + } + /* calculate max bytes to compress for this call. */ + if(!end) { + max = p->conf.work_block_size * 1024; + if(max == 0 || max > len) max = len; + } else { + max = len; + } + + /* Compress chunks from in_queue into chunks for write_queue */ + for(c = hctx->in_queue->first; c && out < max; c = c->next) { + chunk_finished = 0; + len = 0; + + switch(c->type) { + case MEM_CHUNK: + len = c->mem->used - 1; + if(len > (max - out)) len = max - out; + if (mod_deflate_compress(srv, con, hctx, (unsigned char *)c->mem->ptr, len) < 0) { + log_error_write(srv, __FILE__, __LINE__, "s", + "compress failed."); + return HANDLER_ERROR; + } + c->offset += len; + out += len; + if (c->offset == c->mem->used - 1) { + chunk_finished = 1; + chunks_written++; + } + break; + case FILE_CHUNK: + len = c->file.length - c->offset; + if(len > (max - out)) len = max - out; + if ((len = mod_deflate_file_chunk(srv, con, hctx, c, len)) < 0) { + log_error_write(srv, __FILE__, __LINE__, "s", + "compress file chunk failed."); + return HANDLER_ERROR; + } + out += len; + if (c->offset == c->file.length) { + chunk_finished = 1; + chunks_written++; + } + break; + default: + + log_error_write(srv, __FILE__, __LINE__, "ds", c, "type not known"); + + return HANDLER_ERROR; + } + if(!chunk_finished) break; + } + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "compressed bytes:", out); + } + hctx->in_queue->bytes_out += out; + + if(chunks_written > 0) { + chunkqueue_remove_finished_chunks(hctx->in_queue); + } + + close_stream = (con->file_finished && chunkqueue_is_empty(hctx->in_queue)); + rc = mod_deflate_stream_flush(srv, con, hctx, close_stream); + if(rc < 0) { + log_error_write(srv, __FILE__, __LINE__, "s", "flush error"); + } + if(close_stream || end) { + deflate_compress_cleanup(srv, con, hctx); + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sbsb", + "finished uri:", con->uri.path_raw, ", query:", con->uri.query); + } + return HANDLER_FINISHED; + } else { + if(!chunkqueue_is_empty(hctx->in_queue)) { + /* We have more data to compress. */ + joblist_append(srv, con); + } + return HANDLER_COMEBACK; + } +} + +#define PATCH(x) \ + p->conf.x = s->x; +static int mod_deflate_patch_connection(server *srv, connection *con, plugin_data *p) { + size_t i, j; + plugin_config *s = p->config_storage[0]; + + PATCH(output_buffer_size); + PATCH(mimetypes); + PATCH(compression_level); + PATCH(mem_level); + PATCH(window_size); + PATCH(min_compress_size); + PATCH(work_block_size); + PATCH(enabled); + PATCH(debug); + PATCH(bzip2); + PATCH(sync_flush); + + /* skip the first, the global context */ + for (i = 1; i < srv->config_context->used; i++) { + data_config *dc = (data_config *)srv->config_context->data[i]; + s = p->config_storage[i]; + + /* condition didn't match */ + if (!config_check_cond(srv, con, dc)) continue; + + /* merge config */ + for (j = 0; j < dc->value->used; j++) { + data_unset *du = dc->value->data[j]; + + if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.output-buffer-size"))) { + PATCH(output_buffer_size); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.mimetypes"))) { + PATCH(mimetypes); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.compression-level"))) { + PATCH(compression_level); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.mem-level"))) { + PATCH(mem_level); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.window-size"))) { + PATCH(window_size); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.min-compress-size"))) { + PATCH(min_compress_size); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.work-block-size"))) { + PATCH(work_block_size); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.enabled"))) { + PATCH(enabled); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.debug"))) { + PATCH(debug); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.bzip2"))) { + PATCH(bzip2); + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.sync-flush"))) { + PATCH(sync_flush); + } + } + } + + return 0; +} +#undef PATCH + +PHYSICALPATH_FUNC(mod_deflate_handle_response_start) { + plugin_data *p = p_d; + handler_ctx *hctx; + data_string *ds; + int accept_encoding = 0; + char *value; + int srv_encodings = 0; + int matched_encodings = 0; + const char *dflt_gzip = "gzip"; + const char *dflt_deflate = "deflate"; + const char *dflt_bzip2 = "bzip2"; + const char *compression_name = NULL; + int file_len=0; + int rc=-2; + size_t m; + + /* disable compression for some http status types. */ + switch(con->http_status) { + case 100: + case 101: + case 204: + case 205: + case 304: + /* disable compression as we have no response entity */ + return HANDLER_GO_ON; + default: + break; + } + + mod_deflate_patch_connection(srv, con, p); + + /* is compression allowed */ + if(!p->conf.enabled) { + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "s", "compression disabled."); + } + return HANDLER_GO_ON; + } + + /* the response might change according to Accept-Encoding */ + if (NULL != (ds = (data_string *)array_get_element(con->response.headers, "Vary"))) { + /* append Accept-Encoding to Vary header */ + if (NULL == strstr(ds->value->ptr, "Accept-Encoding")) { + buffer_append_string(ds->value, ",Accept-Encoding"); + } + } else { + response_header_insert(srv, con, CONST_STR_LEN("Vary"), + CONST_STR_LEN("Accept-Encoding")); + } + + /* Check if response has a Content-Encoding. */ + if (NULL != (ds = (data_string *)array_get_element(con->response.headers, "Content-Encoding"))) { + return HANDLER_GO_ON; + } + + /* Check Accept-Encoding for supported encoding. */ + if (NULL == (ds = (data_string *)array_get_element(con->request.headers, "Accept-Encoding"))) { + return HANDLER_GO_ON; + } + + /* get client side support encodings */ + value = ds->value->ptr; +#ifdef USE_ZLIB + if (NULL != strstr(value, "gzip")) accept_encoding |= HTTP_ACCEPT_ENCODING_GZIP; + if (NULL != strstr(value, "deflate")) accept_encoding |= HTTP_ACCEPT_ENCODING_DEFLATE; +#endif + /* if (NULL != strstr(value, "compress")) accept_encoding |= HTTP_ACCEPT_ENCODING_COMPRESS; */ +#ifdef USE_BZ2LIB + if(p->conf.bzip2) { + if (NULL != strstr(value, "bzip2")) accept_encoding |= HTTP_ACCEPT_ENCODING_BZIP2; + } +#endif + if (NULL != strstr(value, "identity")) accept_encoding |= HTTP_ACCEPT_ENCODING_IDENTITY; + + /* get server side supported ones */ +#ifdef USE_BZ2LIB + if(p->conf.bzip2) { + srv_encodings |= HTTP_ACCEPT_ENCODING_BZIP2; + } +#endif +#ifdef USE_ZLIB + srv_encodings |= HTTP_ACCEPT_ENCODING_GZIP; + srv_encodings |= HTTP_ACCEPT_ENCODING_DEFLATE; +#endif + + /* find matching encodings */ + matched_encodings = accept_encoding & srv_encodings; + if (!matched_encodings) { + return HANDLER_GO_ON; + } + + /* check if size of response is below min-compress-size */ + if(con->file_finished && con->request.http_method != HTTP_METHOD_HEAD) { + file_len = chunkqueue_length(con->write_queue); + if(file_len == 0) return HANDLER_GO_ON; + } else { + file_len = 0; + } + if(file_len > 0 && p->conf.min_compress_size > 0 && file_len < p->conf.min_compress_size) { + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "Content-Length smaller then min_compress_size: file_len=", file_len); + } + return HANDLER_GO_ON; + } + + /* Check mimetype in response header "Content-Type" */ + if (NULL != (ds = (data_string *)array_get_element(con->response.headers, "Content-Type"))) { + int found = 0; + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sb", + "Content-Type:", ds->value); + } + for (m = 0; m < p->conf.mimetypes->used; m++) { + data_string *mimetype = (data_string *)p->conf.mimetypes->data[m]; + + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sb", + "mime-type:", mimetype->value); + } + if (buffer_is_equal(mimetype->value, ds->value)) { + /* mimetype found */ + found = 1; + break; + } + } + if(!found && p->conf.mimetypes->used > 0) { + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sb", + "No compression for mimetype:", ds->value); + } + return HANDLER_GO_ON; + } + } + + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "s", + "enable compression."); + } + /* enable compression */ + hctx = handler_ctx_init(); + hctx->plugin_data = p; + + /* select best matching encoding */ + if (matched_encodings & HTTP_ACCEPT_ENCODING_BZIP2) { + hctx->compression_type = HTTP_ACCEPT_ENCODING_BZIP2; + compression_name = dflt_bzip2; + rc = stream_bzip2_init(srv, con, hctx); + } else if (matched_encodings & HTTP_ACCEPT_ENCODING_GZIP) { + hctx->compression_type = HTTP_ACCEPT_ENCODING_GZIP; + compression_name = dflt_gzip; + rc = stream_deflate_init(srv, con, hctx); + } else if (matched_encodings & HTTP_ACCEPT_ENCODING_DEFLATE) { + hctx->compression_type = HTTP_ACCEPT_ENCODING_DEFLATE; + compression_name = dflt_deflate; + rc = stream_deflate_init(srv, con, hctx); + } + if(rc == -1) { + log_error_write(srv, __FILE__, __LINE__, "s", + "Failed to initialize compression."); + } + if(rc < 0) { + handler_ctx_free(hctx); + return HANDLER_GO_ON; + } + + /* setup output buffer. */ + if(p->conf.sync_flush || p->conf.output_buffer_size == 0) { + buffer_prepare_copy(p->tmp_buf, 32 * 1024); + hctx->output = p->tmp_buf; + } else { + hctx->output = buffer_init(); + buffer_prepare_copy(hctx->output, p->conf.output_buffer_size); + } + con->plugin_ctx[p->id] = hctx; + + /* set Content-Encoding to show selected compression type. */ + response_header_overwrite(srv, con, CONST_STR_LEN("Content-Encoding"), compression_name, strlen(compression_name)); + + /* if file finished and size less then work-block-size, then compress the content now. */ + if(con->file_finished && (p->conf.work_block_size == 0 || file_len < (p->conf.work_block_size * 1024)) && + con->request.http_method != HTTP_METHOD_HEAD) { + /* We don't have to use chunked encoding. */ + con->response.transfer_encoding = 0; + con->parsed_response &= ~(HTTP_CONTENT_LENGTH); + /* Compress all response content. */ + if(p->conf.debug) { + log_error_write(srv, __FILE__, __LINE__, "sd", + "Compress all content and use Content-Length header: uncompress len=", file_len); + } + return deflate_compress_response(srv, con, hctx, 1); + } else { + /* Remove Content-Length header. We don't know the length. */ + con->parsed_response &= ~(HTTP_CONTENT_LENGTH); + if (con->request.http_version == HTTP_VERSION_1_1) { + /* Make sure to use chunked encoding. */ + con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED; + /* Remove Content-Length header. We don't know the length. */ + con->parsed_response &= ~(HTTP_CONTENT_LENGTH); + } else { + /* HTTP/1.0 doesn't support chunked transfer encoding. */ + con->response.transfer_encoding = 0; + return deflate_compress_response(srv, con, hctx, 1); + } + } + + return HANDLER_GO_ON; +} + +JOBLIST_FUNC(mod_deflate_handle_response_filter) { + plugin_data *p = p_d; + handler_ctx *hctx = con->plugin_ctx[p->id]; + + if(hctx == NULL) return HANDLER_GO_ON; + if(!hctx->stream_open) return HANDLER_GO_ON; + if(con->request.http_method == HTTP_METHOD_HEAD) return HANDLER_GO_ON; + + return deflate_compress_response(srv, con, hctx, 0); +} + +handler_t mod_deflate_cleanup(server *srv, connection *con, void *p_d) { + plugin_data *p = p_d; + handler_ctx *hctx = con->plugin_ctx[p->id]; + + if(hctx == NULL) return HANDLER_GO_ON; + + if(p->conf.debug && hctx->stream_open) { + log_error_write(srv, __FILE__, __LINE__, "sbsb", + "stream open at cleanup. uri=", con->uri.path_raw, ", query=", con->uri.query); + } + + deflate_compress_cleanup(srv, con, hctx); + + return HANDLER_GO_ON; +} + +int mod_deflate_plugin_init(plugin *p) { + p->version = LIGHTTPD_VERSION_ID; + p->name = buffer_init_string("deflate"); + + p->init = mod_deflate_init; + p->cleanup = mod_deflate_free; + p->set_defaults = mod_deflate_setdefaults; + p->connection_reset = mod_deflate_cleanup; + p->handle_connection_close = mod_deflate_cleanup; + p->handle_response_start = mod_deflate_handle_response_start; + p->handle_response_filter = mod_deflate_handle_response_filter; + + p->data = NULL; + + return 0; +} diff -Naur lighttpd-1.4.10.orig/src/plugin.c lighttpd-1.4.10/src/plugin.c --- lighttpd-1.4.10.orig/src/plugin.c 2006-02-08 04:00:54.000000000 -0800 +++ lighttpd-1.4.10/src/plugin.c 2006-02-16 22:25:16.514345356 -0800 @@ -40,6 +40,8 @@ PLUGIN_FUNC_HANDLE_SIGHUP, PLUGIN_FUNC_HANDLE_SUBREQUEST, PLUGIN_FUNC_HANDLE_SUBREQUEST_START, + PLUGIN_FUNC_HANDLE_RESPONSE_START, + PLUGIN_FUNC_HANDLE_RESPONSE_FILTER, PLUGIN_FUNC_HANDLE_JOBLIST, PLUGIN_FUNC_HANDLE_DOCROOT, PLUGIN_FUNC_HANDLE_PHYSICAL, @@ -266,6 +268,8 @@ PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_CONNECTION_CLOSE, handle_connection_close) PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SUBREQUEST, handle_subrequest) PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SUBREQUEST_START, handle_subrequest_start) +PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_RESPONSE_START, handle_response_start) +PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_RESPONSE_FILTER, handle_response_filter) PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_JOBLIST, handle_joblist) PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_DOCROOT, handle_docroot) PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_PHYSICAL, handle_physical) @@ -395,6 +399,8 @@ PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SIGHUP, handle_sighup); PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SUBREQUEST, handle_subrequest); PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SUBREQUEST_START, handle_subrequest_start); + PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_RESPONSE_START, handle_response_start); + PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_RESPONSE_FILTER, handle_response_filter); PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_JOBLIST, handle_joblist); PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_DOCROOT, handle_docroot); PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_PHYSICAL, handle_physical); diff -Naur lighttpd-1.4.10.orig/src/plugin.h lighttpd-1.4.10/src/plugin.h --- lighttpd-1.4.10.orig/src/plugin.h 2005-08-15 02:28:56.000000000 -0700 +++ lighttpd-1.4.10/src/plugin.h 2006-02-16 22:25:16.514345356 -0800 @@ -54,6 +54,8 @@ * has to be found */ handler_t (* handle_subrequest) (server *srv, connection *con, void *p_d); /* */ + handler_t (* handle_response_start) (server *srv, connection *con, void *p_d); /* before response headers are written */ + handler_t (* handle_response_filter) (server *srv, connection *con, void *p_d); /* response content filter */ handler_t (* connection_reset) (server *srv, connection *con, void *p_d); /* */ void *data; @@ -68,6 +70,8 @@ handler_t plugins_call_handle_uri_clean(server *srv, connection *con); handler_t plugins_call_handle_subrequest_start(server *srv, connection *con); handler_t plugins_call_handle_subrequest(server *srv, connection *con); +handler_t plugins_call_handle_response_start(server *srv, connection *con); +handler_t plugins_call_handle_response_filter(server *srv, connection *con); handler_t plugins_call_handle_request_done(server *srv, connection *con); handler_t plugins_call_handle_docroot(server *srv, connection *con); handler_t plugins_call_handle_physical(server *srv, connection *con); diff -Naur lighttpd-1.4.10.orig/src/response.c lighttpd-1.4.10/src/response.c --- lighttpd-1.4.10.orig/src/response.c 2006-02-08 04:01:01.000000000 -0800 +++ lighttpd-1.4.10/src/response.c 2006-02-16 22:25:16.515345187 -0800 @@ -31,7 +31,7 @@ int have_date = 0; int have_server = 0; - b = chunkqueue_get_prepend_buffer(con->write_queue); + b = chunkqueue_get_prepend_buffer(con->output_queue); if (con->request.http_version == HTTP_VERSION_1_1) { BUFFER_COPY_STRING_CONST(b, "HTTP/1.1 "); diff -Naur lighttpd-1.4.10.orig/src/server.c lighttpd-1.4.10/src/server.c --- lighttpd-1.4.10.orig/src/server.c 2006-02-01 03:50:02.000000000 -0800 +++ lighttpd-1.4.10/src/server.c 2006-02-16 22:25:16.516345019 -0800 @@ -173,6 +173,9 @@ srv->joblist = calloc(1, sizeof(*srv->joblist)); assert(srv->joblist); + srv->joblist_prev = calloc(1, sizeof(*srv->joblist)); + assert(srv->joblist_prev); + srv->fdwaitqueue = calloc(1, sizeof(*srv->fdwaitqueue)); assert(srv->fdwaitqueue); @@ -259,6 +262,7 @@ #undef CLEAN joblist_free(srv, srv->joblist); + joblist_free(srv, srv->joblist_prev); fdwaitqueue_free(srv, srv->fdwaitqueue); if (srv->stat_cache) { @@ -1016,6 +1020,7 @@ /* main-loop */ while (!srv_shutdown) { int n; + int timeout; size_t ndx; time_t min_ts; @@ -1243,7 +1248,12 @@ } } - if ((n = fdevent_poll(srv->ev, 1000)) > 0) { + if(srv->joblist->used > 0) { + timeout = 500; + } else { + timeout = 1000; + } + if ((n = fdevent_poll(srv->ev, timeout)) > 0) { /* n is the number of events */ int revents; int fd_ndx; @@ -1291,25 +1301,29 @@ strerror(errno)); } - for (ndx = 0; ndx < srv->joblist->used; ndx++) { - connection *con = srv->joblist->ptr[ndx]; - handler_t r; - - connection_state_machine(srv, con); - - switch(r = plugins_call_handle_joblist(srv, con)) { - case HANDLER_FINISHED: - case HANDLER_GO_ON: - break; - default: - log_error_write(srv, __FILE__, __LINE__, "d", r); - break; + if(srv->joblist->used > 0) { + connections *joblist = srv->joblist; + /* switch joblist queues. */ + srv->joblist = srv->joblist_prev; + srv->joblist_prev = joblist; + for (ndx = 0; ndx < joblist->used; ndx++) { + connection *con = joblist->ptr[ndx]; + handler_t r; + + con->in_joblist = 0; + connection_state_machine(srv, con); + + switch(r = plugins_call_handle_joblist(srv, con)) { + case HANDLER_FINISHED: + case HANDLER_GO_ON: + break; + default: + log_error_write(srv, __FILE__, __LINE__, "d", r); + break; + } } - - con->in_joblist = 0; + joblist->used = 0; } - - srv->joblist->used = 0; } if (srv->srvconf.pid_file->used &&