1 diff -Naur lighttpd-1.4.10.orig/src/Makefile.am lighttpd-1.4.10/src/Makefile.am
2 --- lighttpd-1.4.10.orig/src/Makefile.am 2006-01-04 06:08:03.000000000 -0800
3 +++ lighttpd-1.4.10/src/Makefile.am 2006-02-16 22:25:16.504347041 -0800
5 mod_ssi_expr.c: mod_ssi_exprparser.h
7 common_src=buffer.c log.c \
9 + keyvalue.c chunk.c chunk_encode.c \
10 http_chunk.c stream.c fdevent.c \
11 stat_cache.c plugin.c joblist.c etag.c array.c \
12 data_string.c data_count.c data_array.c \
14 mod_accesslog_la_LDFLAGS = -module -export-dynamic -avoid-version -no-undefined
15 mod_accesslog_la_LIBADD = $(common_libadd)
17 +lib_LTLIBRARIES += mod_deflate.la
18 +mod_deflate_la_SOURCES = mod_deflate.c
19 +mod_deflate_la_LDFLAGS = -module -export-dynamic -avoid-version -no-undefined
20 +mod_deflate_la_LIBADD = $(Z_LIB) $(BZ_LIB) $(common_libadd)
23 hdr = server.h buffer.h network.h log.h keyvalue.h \
24 - response.h request.h fastcgi.h chunk.h \
25 + response.h request.h fastcgi.h chunk.h chunk_encode.h \
26 settings.h http_chunk.h http_auth_digest.h \
27 md5.h http_auth.h stream.h \
28 fdevent.h connections.h base.h stat_cache.h \
29 diff -Naur lighttpd-1.4.10.orig/src/base.h lighttpd-1.4.10/src/base.h
30 --- lighttpd-1.4.10.orig/src/base.h 2006-01-11 06:51:04.000000000 -0800
31 +++ lighttpd-1.4.10/src/base.h 2006-02-16 22:25:16.504347041 -0800
36 + int end_chunk; /* used for chunked transfer encoding. */
38 - chunkqueue *write_queue; /* a large queue for low-level write ( HTTP response ) [ file, mem ] */
39 + chunkqueue *write_queue; /* a large queue for HTTP response content [ file, mem ] */
40 + chunkqueue *output_queue; /* a large queue for low-level write ( HTTP response ) [ file, mem ] */
41 chunkqueue *read_queue; /* a small queue for low-level read ( HTTP request ) [ mem ] */
42 chunkqueue *request_content_queue; /* takes request-content into tempfile if necessary [ tempfile, mem ]*/
48 + connections *joblist_prev;
49 connections *fdwaitqueue;
51 stat_cache *stat_cache;
52 diff -Naur lighttpd-1.4.10.orig/src/chunk.c lighttpd-1.4.10/src/chunk.c
53 --- lighttpd-1.4.10.orig/src/chunk.c 2005-11-18 05:18:19.000000000 -0800
54 +++ lighttpd-1.4.10/src/chunk.c 2006-02-16 22:25:16.505346873 -0800
59 +int chunkqueue_append_chunkqueue(chunkqueue *cq, chunkqueue *src) {
60 + if(src == NULL) return 0;
61 + chunkqueue_append_chunk(cq, src->first);
62 + cq->last = src->last;
69 buffer * chunkqueue_get_prepend_buffer(chunkqueue *cq) {
72 diff -Naur lighttpd-1.4.10.orig/src/chunk.h lighttpd-1.4.10/src/chunk.h
73 --- lighttpd-1.4.10.orig/src/chunk.h 2005-10-31 23:32:21.000000000 -0800
74 +++ lighttpd-1.4.10/src/chunk.h 2006-02-16 22:25:16.505346873 -0800
76 int chunkqueue_append_file(chunkqueue *c, buffer *fn, off_t offset, off_t len);
77 int chunkqueue_append_mem(chunkqueue *c, const char *mem, size_t len);
78 int chunkqueue_append_buffer(chunkqueue *c, buffer *mem);
79 +int chunkqueue_append_chunkqueue(chunkqueue *cq, chunkqueue *src);
80 int chunkqueue_prepend_buffer(chunkqueue *c, buffer *mem);
82 buffer * chunkqueue_get_append_buffer(chunkqueue *c);
83 diff -Naur lighttpd-1.4.10.orig/src/chunk_encode.c lighttpd-1.4.10/src/chunk_encode.c
84 --- lighttpd-1.4.10.orig/src/chunk_encode.c 1969-12-31 16:00:00.000000000 -0800
85 +++ lighttpd-1.4.10/src/chunk_encode.c 2006-02-16 22:25:16.505346873 -0800
88 + * the HTTP chunk-API
93 +#include <sys/types.h>
94 +#include <sys/stat.h>
106 +#include "chunk_encode.h"
109 +static int chunk_encode_append_len(chunkqueue *cq, size_t len) {
110 + size_t i, olen = len, j;
113 + /*b = srv->tmp_chunk_len;*/
114 + /*b = buffer_init();*/
115 + b = chunkqueue_get_append_buffer(cq);
118 + buffer_copy_string(b, "0");
120 + for (i = 0; i < 8 && len; i++) {
124 + /* i is the number of hex digits we have */
125 + buffer_prepare_copy(b, i + 1);
127 + for (j = i-1, len = olen; j+1 > 0; j--) {
128 + b->ptr[j] = (len & 0xf) + (((len & 0xf) <= 9) ? '0' : 'a' - 10);
132 + b->ptr[b->used++] = '\0';
135 + buffer_append_string(b, "\r\n");
137 + chunkqueue_append_buffer(cq, b);
145 +int chunk_encode_append_file(chunkqueue *cq, buffer *fn, off_t offset, off_t len) {
146 + if (!cq) return -1;
147 + if (len == 0) return 0;
149 + chunk_encode_append_len(cq, len);
151 + chunkqueue_append_file(cq, fn, offset, len);
153 + chunkqueue_append_mem(cq, "\r\n", 2 + 1);
158 +int chunk_encode_append_buffer(chunkqueue *cq, buffer *mem) {
159 + if (!cq) return -1;
160 + if (mem->used <= 1) return 0;
162 + chunk_encode_append_len(cq, mem->used - 1);
164 + chunkqueue_append_buffer(cq, mem);
166 + chunkqueue_append_mem(cq, "\r\n", 2 + 1);
171 +int chunk_encode_append_mem(chunkqueue *cq, const char * mem, size_t len) {
172 + if (!cq) return -1;
173 + if (len <= 1) return 0;
175 + chunk_encode_append_len(cq, len - 1);
177 + chunkqueue_append_mem(cq, mem, len);
179 + chunkqueue_append_mem(cq, "\r\n", 2 + 1);
184 +int chunk_encode_append_queue(chunkqueue *cq, chunkqueue *src) {
185 + int len = chunkqueue_length(src);
186 + if (!cq) return -1;
187 + if (len == 0) return 0;
189 + chunk_encode_append_len(cq, len);
191 + chunkqueue_append_chunkqueue(cq, src);
193 + chunkqueue_append_mem(cq, "\r\n", 2 + 1);
198 +int chunk_encode_end(chunkqueue *cq) {
199 + chunk_encode_append_len(cq, 0);
200 + chunkqueue_append_mem(cq, "\r\n", 2 + 1);
204 diff -Naur lighttpd-1.4.10.orig/src/chunk_encode.h lighttpd-1.4.10/src/chunk_encode.h
205 --- lighttpd-1.4.10.orig/src/chunk_encode.h 1969-12-31 16:00:00.000000000 -0800
206 +++ lighttpd-1.4.10/src/chunk_encode.h 2006-02-16 22:25:16.506346704 -0800
208 +#ifndef _CHUNK_ENCODE_H_
209 +#define _CHUNK_ENCODE_H_
212 +#include <sys/types.h>
214 +int chunk_encode_append_mem(chunkqueue *cq, const char * mem, size_t len);
215 +int chunk_encode_append_buffer(chunkqueue *cq, buffer *mem);
216 +int chunk_encode_append_file(chunkqueue *cq, buffer *fn, off_t offset, off_t len);
217 +int chunk_encode_append_queue(chunkqueue *cq, chunkqueue *src);
218 +int chunk_encode_end(chunkqueue *cq);
221 diff -Naur lighttpd-1.4.10.orig/src/connections.c lighttpd-1.4.10/src/connections.c
222 --- lighttpd-1.4.10.orig/src/connections.c 2006-02-08 04:27:20.000000000 -0800
223 +++ lighttpd-1.4.10/src/connections.c 2006-02-16 22:25:16.507346536 -0800
225 #include "response.h"
227 #include "http_chunk.h"
228 +#include "chunk_encode.h"
229 #include "stat_cache.h"
236 +int connection_queue_is_empty(connection *con) {
237 + if(!chunkqueue_is_empty(con->write_queue)) return 0;
238 + if(!chunkqueue_is_empty(con->output_queue)) return 0;
243 static void dump_packet(const unsigned char *data, size_t len) {
246 con->file_finished = 1;
248 chunkqueue_reset(con->write_queue);
249 + chunkqueue_reset(con->output_queue);
253 @@ -472,12 +480,27 @@
254 /* disable chunked encoding again as we have no body */
255 con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED;
256 chunkqueue_reset(con->write_queue);
257 + chunkqueue_reset(con->output_queue);
259 con->file_finished = 1;
264 + /* Allow filter plugins to change response headers before they are written. */
265 + switch(plugins_call_handle_response_start(srv, con)) {
266 + case HANDLER_GO_ON:
267 + case HANDLER_FINISHED:
268 + /* response start is finished */
271 + /* something strange happend */
272 + log_error_write(srv, __FILE__, __LINE__, "s", "Filter plugin failed.");
273 + connection_set_state(srv, con, CON_STATE_ERROR);
274 + joblist_append(srv, con);
278 if (con->file_finished) {
279 /* we have all the content and chunked encoding is not used, set a content-length */
283 if (con->request.http_method == HTTP_METHOD_HEAD) {
284 chunkqueue_reset(con->write_queue);
285 + chunkqueue_reset(con->output_queue);
288 http_response_write_header(srv, con);
289 @@ -525,11 +549,57 @@
292 static int connection_handle_write(server *srv, connection *con) {
293 - switch(network_write_chunkqueue(srv, con, con->write_queue)) {
297 + /* Allow filter plugins to modify response conent */
298 + switch(plugins_call_handle_response_filter(srv, con)) {
299 + case HANDLER_GO_ON:
300 + finished = con->file_finished;
301 + /* response content not changed */
303 + case HANDLER_COMEBACK:
304 + /* response filter has more work */
307 + case HANDLER_FINISHED:
308 + /* response filter is finished */
312 + /* something strange happend */
313 + log_error_write(srv, __FILE__, __LINE__, "s", "Filter plugin failed.");
314 + connection_set_state(srv, con, CON_STATE_ERROR);
315 + joblist_append(srv, con);
320 + /* move chunks from write_queue to output_queue. */
321 + if (con->request.http_method == HTTP_METHOD_HEAD) {
322 + chunkqueue_reset(con->write_queue);
324 + len = chunkqueue_length(con->write_queue);
325 + if(con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) {
326 + chunk_encode_append_queue(con->output_queue, con->write_queue);
327 + if(finished && !con->end_chunk) {
328 + con->end_chunk = 1;
329 + chunk_encode_end(con->output_queue);
332 + chunkqueue_append_chunkqueue(con->output_queue, con->write_queue);
334 + con->write_queue->bytes_out += len;
336 + /* write chunks from output_queue to network */
337 + switch(network_write_chunkqueue(srv, con, con->output_queue)) {
339 - if (con->file_finished) {
341 connection_set_state(srv, con, CON_STATE_RESPONSE_END);
342 joblist_append(srv, con);
344 + /* not finished yet -> WRITE */
345 + con->is_writable = 1;
348 case -1: /* error on our side */
352 con->write_queue = chunkqueue_init();
353 + con->output_queue = chunkqueue_init();
354 con->read_queue = chunkqueue_init();
355 con->request_content_queue = chunkqueue_init();
356 chunkqueue_set_tempdirs(con->request_content_queue, srv->srvconf.upload_tempdirs);
358 connection_reset(srv, con);
360 chunkqueue_free(con->write_queue);
361 + chunkqueue_free(con->output_queue);
362 chunkqueue_free(con->read_queue);
363 chunkqueue_free(con->request_content_queue);
364 array_free(con->request.headers);
366 con->http_status = 0;
367 con->file_finished = 0;
368 con->file_started = 0;
369 + con->end_chunk = 0;
370 con->got_response = 0;
372 con->parsed_response = 0;
374 array_reset(con->environment);
376 chunkqueue_reset(con->write_queue);
377 + chunkqueue_reset(con->output_queue);
378 chunkqueue_reset(con->request_content_queue);
380 /* the plugins should cleanup themself */
381 @@ -1178,7 +1252,6 @@
384 if (con->state == CON_STATE_WRITE &&
385 - !chunkqueue_is_empty(con->write_queue) &&
388 if (-1 == connection_handle_write(srv, con)) {
389 @@ -1573,15 +1646,15 @@
392 /* only try to write if we have something in the queue */
393 - if (!chunkqueue_is_empty(con->write_queue)) {
395 + if (!connection_queue_is_empty(con)) {
396 log_error_write(srv, __FILE__, __LINE__, "dsd",
399 - con->write_queue->used);
401 + con->output_queue->used);
403 - if (!chunkqueue_is_empty(con->write_queue) && con->is_writable) {
405 + if (con->is_writable) {
406 if (-1 == connection_handle_write(srv, con)) {
407 log_error_write(srv, __FILE__, __LINE__, "ds",
409 @@ -1691,9 +1764,9 @@
410 * - if we have data to write
411 * - if the socket is not writable yet
413 - if (!chunkqueue_is_empty(con->write_queue) &&
414 - (con->is_writable == 0) &&
415 - (con->traffic_limit_reached == 0)) {
416 + if ((con->is_writable == 0) &&
417 + (con->traffic_limit_reached == 0) &&
418 + !connection_queue_is_empty(con)) {
419 fdevent_event_add(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_OUT);
421 fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd);
422 diff -Naur lighttpd-1.4.10.orig/src/http_chunk.c lighttpd-1.4.10/src/http_chunk.c
423 --- lighttpd-1.4.10.orig/src/http_chunk.c 2005-08-10 15:26:50.000000000 -0700
424 +++ lighttpd-1.4.10/src/http_chunk.c 2006-02-16 22:25:16.508346367 -0800
427 cq = con->write_queue;
429 - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) {
430 - http_chunk_append_len(srv, con, len);
434 chunkqueue_append_file(cq, fn, offset, len);
436 - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED && len > 0) {
437 - chunkqueue_append_mem(cq, "\r\n", 2 + 1);
445 cq = con->write_queue;
447 - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) {
448 - http_chunk_append_len(srv, con, mem->used - 1);
452 chunkqueue_append_buffer(cq, mem);
454 - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED && mem->used > 0) {
455 - chunkqueue_append_mem(cq, "\r\n", 2 + 1);
462 cq = con->write_queue;
465 - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) {
466 - http_chunk_append_len(srv, con, 0);
467 - chunkqueue_append_mem(cq, "\r\n", 2 + 1);
469 - chunkqueue_append_mem(cq, "", 1);
474 - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) {
475 - http_chunk_append_len(srv, con, len - 1);
478 chunkqueue_append_mem(cq, mem, len);
480 - if (con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) {
481 - chunkqueue_append_mem(cq, "\r\n", 2 + 1);
487 diff -Naur lighttpd-1.4.10.orig/src/joblist.c lighttpd-1.4.10/src/joblist.c
488 --- lighttpd-1.4.10.orig/src/joblist.c 2005-08-10 15:26:41.000000000 -0700
489 +++ lighttpd-1.4.10/src/joblist.c 2006-02-16 22:25:16.508346367 -0800
492 int joblist_append(server *srv, connection *con) {
493 if (con->in_joblist) return 0;
494 + con->in_joblist = 1;
496 if (srv->joblist->size == 0) {
497 srv->joblist->size = 16;
498 diff -Naur lighttpd-1.4.10.orig/src/mod_deflate.c lighttpd-1.4.10/src/mod_deflate.c
499 --- lighttpd-1.4.10.orig/src/mod_deflate.c 1969-12-31 16:00:00.000000000 -0800
500 +++ lighttpd-1.4.10/src/mod_deflate.c 2006-02-17 23:26:45.885437687 -0800
502 +#include <sys/types.h>
503 +#include <sys/stat.h>
517 +#include "response.h"
518 +#include "joblist.h"
519 +#include "stat_cache.h"
526 +#if defined HAVE_ZLIB_H && defined HAVE_LIBZ
530 +# define Z_DEFAULT_COMPRESSION 1
533 +#if defined HAVE_BZLIB_H && defined HAVE_LIBBZ2
535 +/* we don't need stdio interface */
536 +# define BZ_NO_STDIO
540 +#include "sys-mmap.h"
542 +/* request: accept-encoding */
543 +#define HTTP_ACCEPT_ENCODING_IDENTITY BV(0)
544 +#define HTTP_ACCEPT_ENCODING_GZIP BV(1)
545 +#define HTTP_ACCEPT_ENCODING_DEFLATE BV(2)
546 +#define HTTP_ACCEPT_ENCODING_COMPRESS BV(3)
547 +#define HTTP_ACCEPT_ENCODING_BZIP2 BV(4)
549 +#define KByte * 1024
550 +#define MByte * 1024 KByte
551 +#define GByte * 1024 MByte
554 + unsigned short debug;
555 + unsigned short enabled;
556 + unsigned short bzip2;
557 + unsigned short sync_flush;
558 + unsigned short output_buffer_size;
559 + unsigned short min_compress_size;
560 + unsigned short work_block_size;
562 + short compression_level;
571 + plugin_config **config_storage;
572 + plugin_config conf;
578 + chunkqueue *in_queue;
580 + /* compression type & state */
581 + int compression_type;
586 + unsigned short gzip_header;
591 + plugin_data *plugin_data;
594 +static handler_ctx *handler_ctx_init() {
597 + hctx = calloc(1, sizeof(*hctx));
598 + hctx->in_queue = chunkqueue_init();
603 +static void handler_ctx_free(handler_ctx *hctx) {
604 + chunkqueue_free(hctx->in_queue);
608 +INIT_FUNC(mod_deflate_init) {
611 + p = calloc(1, sizeof(*p));
613 + p->tmp_buf = buffer_init();
618 +FREE_FUNC(mod_deflate_free) {
619 + plugin_data *p = p_d;
623 + if (!p) return HANDLER_GO_ON;
625 + if (p->config_storage) {
627 + for (i = 0; i < srv->config_context->used; i++) {
628 + plugin_config *s = p->config_storage[i];
632 + array_free(s->mimetypes);
636 + free(p->config_storage);
639 + buffer_free(p->tmp_buf);
643 + return HANDLER_GO_ON;
646 +SETDEFAULTS_FUNC(mod_deflate_setdefaults) {
647 + plugin_data *p = p_d;
650 + config_values_t cv[] = {
651 + { "deflate.output-buffer-size", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION },
652 + { "deflate.mimetypes", NULL, T_CONFIG_ARRAY, T_CONFIG_SCOPE_CONNECTION },
653 + { "deflate.compression-level", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION },
654 + { "deflate.mem-level", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION },
655 + { "deflate.window-size", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION },
656 + { "deflate.min-compress-size", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION },
657 + { "deflate.work-block-size", NULL, T_CONFIG_SHORT, T_CONFIG_SCOPE_CONNECTION },
658 + { "deflate.enabled", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION },
659 + { "deflate.debug", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION },
660 + { "deflate.bzip2", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION },
661 + { "deflate.sync-flush", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION },
662 + { NULL, NULL, T_CONFIG_UNSET, T_CONFIG_SCOPE_UNSET }
665 + p->config_storage = calloc(1, srv->config_context->used * sizeof(specific_config *));
667 + for (i = 0; i < srv->config_context->used; i++) {
670 + s = calloc(1, sizeof(plugin_config));
675 + s->output_buffer_size = 0;
677 + s->window_size = 15;
678 + s->min_compress_size = 0;
679 + s->work_block_size = 2048;
680 + s->compression_level = Z_DEFAULT_COMPRESSION;
681 + s->mimetypes = array_init();
683 + cv[0].destination = &(s->output_buffer_size);
684 + cv[1].destination = s->mimetypes;
685 + cv[2].destination = &(s->compression_level);
686 + cv[3].destination = &(s->mem_level);
687 + cv[4].destination = &(s->window_size);
688 + cv[5].destination = &(s->min_compress_size);
689 + cv[6].destination = &(s->work_block_size);
690 + cv[7].destination = &(s->enabled);
691 + cv[8].destination = &(s->debug);
692 + cv[9].destination = &(s->bzip2);
693 + cv[10].destination = &(s->sync_flush);
695 + p->config_storage[i] = s;
697 + if (0 != config_insert_values_global(srv, ((data_config *)srv->config_context->data[i])->value, cv)) {
698 + return HANDLER_ERROR;
701 + if((s->compression_level < 1 || s->compression_level > 9) &&
702 + s->compression_level != Z_DEFAULT_COMPRESSION) {
703 + log_error_write(srv, __FILE__, __LINE__, "sd",
704 + "compression-level must be between 1 and 9:", s->compression_level);
705 + return HANDLER_ERROR;
708 + if(s->mem_level < 1 || s->mem_level > 9) {
709 + log_error_write(srv, __FILE__, __LINE__, "sd",
710 + "mem-level must be between 1 and 9:", s->mem_level);
711 + return HANDLER_ERROR;
714 + if(s->window_size < 1 || s->window_size > 15) {
715 + log_error_write(srv, __FILE__, __LINE__, "sd",
716 + "window-size must be between 1 and 15:", s->window_size);
717 + return HANDLER_ERROR;
719 + s->window_size = 0 - s->window_size;
721 + if(s->sync_flush) {
722 + s->output_buffer_size = 0;
726 + return HANDLER_GO_ON;
731 +/* Copied gzip_header from apache 2.2's mod_deflate.c */
732 +/* RFC 1952 Section 2.3 defines the gzip header:
734 + * +---+---+---+---+---+---+---+---+---+---+
735 + * |ID1|ID2|CM |FLG| MTIME |XFL|OS |
736 + * +---+---+---+---+---+---+---+---+---+---+
738 +static const char gzip_header[10] =
739 +{ '\037', '\213', Z_DEFLATED, 0,
740 + 0, 0, 0, 0, /* mtime */
741 + 0, 0x03 /* Unix OS_CODE */
743 +static int stream_deflate_init(server *srv, connection *con, handler_ctx *hctx) {
744 + plugin_data *p = hctx->plugin_data;
751 + z->zalloc = Z_NULL;
753 + z->opaque = Z_NULL;
756 + z->next_out = NULL;
759 + if(p->conf.debug) {
760 + log_error_write(srv, __FILE__, __LINE__, "sd",
761 + "output-buffer-size:", p->conf.output_buffer_size);
762 + log_error_write(srv, __FILE__, __LINE__, "sd",
763 + "compression-level:", p->conf.compression_level);
764 + log_error_write(srv, __FILE__, __LINE__, "sd",
765 + "mem-level:", p->conf.mem_level);
766 + log_error_write(srv, __FILE__, __LINE__, "sd",
767 + "window-size:", p->conf.window_size);
768 + log_error_write(srv, __FILE__, __LINE__, "sd",
769 + "min-compress-size:", p->conf.min_compress_size);
770 + log_error_write(srv, __FILE__, __LINE__, "sd",
771 + "work-block-size:", p->conf.work_block_size);
773 + if (Z_OK != deflateInit2(z,
774 + p->conf.compression_level,
776 + p->conf.window_size, /* supress zlib-header */
778 + Z_DEFAULT_STRATEGY)) {
781 + hctx->stream_open = 1;
786 +static int stream_deflate_compress(server *srv, connection *con, handler_ctx *hctx, unsigned char *start, off_t st_size) {
787 + plugin_data *p = hctx->plugin_data;
790 + int in = 0, out = 0;
795 + if(z->next_out == NULL) {
796 + z->next_out = (unsigned char *)hctx->output->ptr;
797 + z->avail_out = hctx->output->size;
800 + if(hctx->compression_type == HTTP_ACCEPT_ENCODING_GZIP) {
801 + if(hctx->gzip_header == 0) {
802 + hctx->gzip_header = 1;
803 + /* copy gzip header into output buffer */
804 + buffer_copy_memory(hctx->output, gzip_header, sizeof(gzip_header));
805 + if(p->conf.debug) {
806 + log_error_write(srv, __FILE__, __LINE__, "sd",
807 + "gzip_header len=", sizeof(gzip_header));
809 + /* initialize crc32 */
810 + hctx->crc = crc32(0L, Z_NULL, 0);
811 + z->next_out = (unsigned char *)(hctx->output->ptr + sizeof(gzip_header));
812 + z->avail_out = hctx->output->size - sizeof(gzip_header);
814 + hctx->crc = crc32(hctx->crc, start, st_size);
817 + z->next_in = start;
818 + z->avail_in = st_size;
819 + hctx->bytes_in += st_size;
821 + /* compress data */
824 + if (Z_OK != deflate(z, Z_NO_FLUSH)) {
826 + hctx->stream_open = 0;
830 + if(z->avail_out == 0 || z->avail_in > 0) {
831 + len = hctx->output->size - z->avail_out;
832 + hctx->bytes_out += len;
834 + chunkqueue_append_mem(con->write_queue, hctx->output->ptr, len+1);
835 + z->next_out = (unsigned char *)hctx->output->ptr;
836 + z->avail_out = hctx->output->size;
838 + } while (z->avail_in > 0);
839 + if(p->conf.debug) {
840 + log_error_write(srv, __FILE__, __LINE__, "sdsd",
841 + "compress: in=", in, ", out=", out);
846 +static int stream_deflate_flush(server *srv, connection *con, handler_ctx *hctx, int end) {
847 + plugin_data *p = hctx->plugin_data;
853 + int in = 0, out = 0;
859 + if(z->next_out == NULL) {
860 + z->next_out = (unsigned char *)hctx->output->ptr;
861 + z->avail_out = hctx->output->size;
863 + /* compress data */
868 + rc = deflate(z, Z_FINISH);
871 + } else if (rc != Z_STREAM_END) {
873 + hctx->stream_open = 0;
877 + if(p->conf.sync_flush) {
878 + rc = deflate(z, Z_SYNC_FLUSH);
879 + } else if(z->avail_in > 0) {
880 + if(p->conf.output_buffer_size > 0) flush = 0;
881 + rc = deflate(z, Z_NO_FLUSH);
883 + if(p->conf.output_buffer_size > 0) flush = 0;
888 + hctx->stream_open = 0;
893 + len = hctx->output->size - z->avail_out;
894 + if(z->avail_out == 0 || (flush && len > 0)) {
895 + hctx->bytes_out += len;
897 + chunkqueue_append_mem(con->write_queue, hctx->output->ptr, len+1);
898 + z->next_out = (unsigned char *)hctx->output->ptr;
899 + z->avail_out = hctx->output->size;
901 + } while (z->avail_in != 0 || !done);
902 + if(p->conf.debug) {
903 + log_error_write(srv, __FILE__, __LINE__, "sdsd",
904 + "flush: in=", in, ", out=", out);
906 + if(p->conf.sync_flush) {
907 + z->next_out = NULL;
913 +static int stream_deflate_end(server *srv, connection *con, handler_ctx *hctx) {
914 + plugin_data *p = hctx->plugin_data;
921 + if(!hctx->stream_open) return 0;
922 + hctx->stream_open = 0;
924 + if(hctx->compression_type == HTTP_ACCEPT_ENCODING_GZIP && hctx->bytes_out > 0 &&
925 + hctx->bytes_out >= sizeof(gzip_header)) {
926 + /* write gzip footer */
927 + unsigned char c[8];
929 + c[0] = (hctx->crc >> 0) & 0xff;
930 + c[1] = (hctx->crc >> 8) & 0xff;
931 + c[2] = (hctx->crc >> 16) & 0xff;
932 + c[3] = (hctx->crc >> 24) & 0xff;
933 + c[4] = (z->total_in >> 0) & 0xff;
934 + c[5] = (z->total_in >> 8) & 0xff;
935 + c[6] = (z->total_in >> 16) & 0xff;
936 + c[7] = (z->total_in >> 24) & 0xff;
937 + /* append footer to write_queue */
938 + chunkqueue_append_mem(con->write_queue, (char *)c, 9);
939 + hctx->bytes_out += 8;
940 + if(p->conf.debug) {
941 + log_error_write(srv, __FILE__, __LINE__, "sd",
942 + "gzip_footer len=", 8);
946 + if ((rc = deflateEnd(z)) != Z_OK) {
947 + if(rc == Z_DATA_ERROR) return 0;
948 + if(z->msg != NULL) {
949 + log_error_write(srv, __FILE__, __LINE__, "sdss",
950 + "deflateEnd error ret=", rc, ", msg=", z->msg);
952 + log_error_write(srv, __FILE__, __LINE__, "sd",
953 + "deflateEnd error ret=", rc);
963 +static int stream_bzip2_init(server *srv, connection *con, handler_ctx *hctx) {
964 + plugin_data *p = hctx->plugin_data;
971 + bz->bzalloc = NULL;
974 + bz->total_in_lo32 = 0;
975 + bz->total_in_hi32 = 0;
976 + bz->total_out_lo32 = 0;
977 + bz->total_out_hi32 = 0;
979 + if(p->conf.debug) {
980 + log_error_write(srv, __FILE__, __LINE__, "sd",
981 + "output-buffer-size:", p->conf.output_buffer_size);
982 + log_error_write(srv, __FILE__, __LINE__, "sd",
983 + "compression-level:", p->conf.compression_level);
984 + log_error_write(srv, __FILE__, __LINE__, "sd",
985 + "mem-level:", p->conf.mem_level);
986 + log_error_write(srv, __FILE__, __LINE__, "sd",
987 + "window-size:", p->conf.window_size);
988 + log_error_write(srv, __FILE__, __LINE__, "sd",
989 + "min-compress-size:", p->conf.min_compress_size);
990 + log_error_write(srv, __FILE__, __LINE__, "sd",
991 + "work-block-size:", p->conf.work_block_size);
993 + if (BZ_OK != BZ2_bzCompressInit(bz,
994 + p->conf.compression_level, /* blocksize = 900k */
996 + 30)) { /* workFactor: default */
999 + hctx->stream_open = 1;
1004 +static int stream_bzip2_compress(server *srv, connection *con, handler_ctx *hctx, unsigned char *start, off_t st_size) {
1005 + plugin_data *p = hctx->plugin_data;
1009 + int in = 0, out = 0;
1015 + if(bz->next_out == NULL) {
1016 + bz->next_out = hctx->output->ptr;
1017 + bz->avail_out = hctx->output->size;
1020 + bz->next_in = (char *)start;
1021 + bz->avail_in = st_size;
1022 + hctx->bytes_in += st_size;
1024 + /* compress data */
1025 + in = bz->avail_in;
1027 + rc = BZ2_bzCompress(bz, BZ_RUN);
1028 + if (rc != BZ_RUN_OK) {
1029 + BZ2_bzCompressEnd(bz);
1030 + hctx->stream_open = 0;
1034 + if(bz->avail_out == 0 || bz->avail_in > 0) {
1035 + len = hctx->output->size - bz->avail_out;
1036 + hctx->bytes_out += len;
1038 + chunkqueue_append_mem(con->write_queue, hctx->output->ptr, len+1);
1039 + bz->next_out = hctx->output->ptr;
1040 + bz->avail_out = hctx->output->size;
1042 + } while (bz->avail_in > 0);
1043 + if(p->conf.debug) {
1044 + log_error_write(srv, __FILE__, __LINE__, "sdsd",
1045 + "compress: in=", in, ", out=", out);
1050 +static int stream_bzip2_flush(server *srv, connection *con, handler_ctx *hctx, int end) {
1051 + plugin_data *p = hctx->plugin_data;
1057 + int in = 0, out = 0;
1063 + if(bz->next_out == NULL) {
1064 + bz->next_out = hctx->output->ptr;
1065 + bz->avail_out = hctx->output->size;
1067 + /* compress data */
1068 + in = bz->avail_in;
1072 + rc = BZ2_bzCompress(bz, BZ_FINISH);
1073 + if (rc == BZ_FINISH_OK) {
1075 + } else if (rc != BZ_STREAM_END) {
1076 + BZ2_bzCompressEnd(bz);
1077 + hctx->stream_open = 0;
1080 + } else if(bz->avail_in > 0) {
1081 + rc = BZ2_bzCompress(bz, BZ_RUN);
1082 + if (rc != BZ_RUN_OK) {
1083 + BZ2_bzCompressEnd(bz);
1084 + hctx->stream_open = 0;
1087 + if(p->conf.output_buffer_size > 0) flush = 0;
1090 + len = hctx->output->size - bz->avail_out;
1091 + if(bz->avail_out == 0 || (flush && len > 0)) {
1092 + hctx->bytes_out += len;
1094 + chunkqueue_append_mem(con->write_queue, hctx->output->ptr, len+1);
1095 + bz->next_out = hctx->output->ptr;
1096 + bz->avail_out = hctx->output->size;
1098 + } while (bz->avail_in != 0 || !done);
1099 + if(p->conf.debug) {
1100 + log_error_write(srv, __FILE__, __LINE__, "sdsd",
1101 + "flush: in=", in, ", out=", out);
1103 + if(p->conf.sync_flush) {
1104 + bz->next_out = NULL;
1105 + bz->avail_out = 0;
1110 +static int stream_bzip2_end(server *srv, connection *con, handler_ctx *hctx) {
1111 + plugin_data *p = hctx->plugin_data;
1119 + if(!hctx->stream_open) return 0;
1120 + hctx->stream_open = 0;
1122 + if ((rc = BZ2_bzCompressEnd(bz)) != BZ_OK) {
1123 + if(rc == BZ_DATA_ERROR) return 0;
1124 + log_error_write(srv, __FILE__, __LINE__, "sd",
1125 + "BZ2_bzCompressEnd error ret=", rc);
1133 +static int mod_deflate_compress(server *srv, connection *con, handler_ctx *hctx, unsigned char *start, off_t st_size) {
1135 + if(st_size == 0) return 0;
1136 + switch(hctx->compression_type) {
1138 + case HTTP_ACCEPT_ENCODING_GZIP:
1139 + case HTTP_ACCEPT_ENCODING_DEFLATE:
1140 + ret = stream_deflate_compress(srv, con, hctx, start, st_size);
1144 + case HTTP_ACCEPT_ENCODING_BZIP2:
1145 + ret = stream_bzip2_compress(srv, con, hctx, start, st_size);
1156 +static int mod_deflate_stream_flush(server *srv, connection *con, handler_ctx *hctx, int end) {
1158 + if(hctx->bytes_in == 0) return 0;
1159 + switch(hctx->compression_type) {
1161 + case HTTP_ACCEPT_ENCODING_GZIP:
1162 + case HTTP_ACCEPT_ENCODING_DEFLATE:
1163 + ret = stream_deflate_flush(srv, con, hctx, end);
1167 + case HTTP_ACCEPT_ENCODING_BZIP2:
1168 + ret = stream_bzip2_flush(srv, con, hctx, end);
1179 +static int mod_deflate_stream_end(server *srv, connection *con, handler_ctx *hctx) {
1181 + switch(hctx->compression_type) {
1183 + case HTTP_ACCEPT_ENCODING_GZIP:
1184 + case HTTP_ACCEPT_ENCODING_DEFLATE:
1185 + ret = stream_deflate_end(srv, con, hctx);
1189 + case HTTP_ACCEPT_ENCODING_BZIP2:
1190 + ret = stream_bzip2_end(srv, con, hctx);
1201 +static int mod_deflate_file_chunk(server *srv, connection *con, handler_ctx *hctx, chunk *c, off_t st_size) {
1202 + plugin_data *p = hctx->plugin_data;
1205 + stat_cache_entry *sce = NULL;
1206 + off_t we_want_to_mmap = 2 MByte;
1207 + off_t we_want_to_send = st_size;
1208 + char *start = NULL;
1210 + if (HANDLER_ERROR == stat_cache_get_entry(srv, con, c->file.name, &sce)) {
1211 + log_error_write(srv, __FILE__, __LINE__, "sb",
1212 + strerror(errno), c->file.name);
1216 + abs_offset = c->file.start + c->offset;
1218 + if (abs_offset > sce->st.st_size) {
1219 + log_error_write(srv, __FILE__, __LINE__, "sb",
1220 + "file was shrinked:", c->file.name);
1225 + we_want_to_send = st_size;
1226 + /* mmap the buffer
1228 + * - new mmap as the we are at the end of the last one */
1229 + if (c->file.mmap.start == MAP_FAILED ||
1230 + abs_offset == (off_t)(c->file.mmap.offset + c->file.mmap.length)) {
1232 + /* Optimizations for the future:
1234 + * adaptive mem-mapping
1236 + * we mmap() the whole file. If someone has alot large files and 32bit
1237 + * machine the virtual address area will be unrun and we will have a failing
1240 + * only mmap 16M in one chunk and move the window as soon as we have finished
1243 + * read-ahead buffering
1245 + * sending out several large files in parallel trashes the read-ahead of the
1246 + * kernel leading to long wait-for-seek times.
1247 + * solutions: (increasing complexity)
1249 + * 2. use a internal read-ahead buffer in the chunk-structure
1250 + * 3. use non-blocking IO for file-transfers
1253 + /* all mmap()ed areas are 512kb expect the last which might be smaller */
1256 + /* this is a remap, move the mmap-offset */
1257 + if (c->file.mmap.start != MAP_FAILED) {
1258 + munmap(c->file.mmap.start, c->file.mmap.length);
1259 + c->file.mmap.offset += we_want_to_mmap;
1261 + /* in case the range-offset is after the first mmap()ed area we skip the area */
1262 + c->file.mmap.offset = 0;
1264 + while (c->file.mmap.offset + we_want_to_mmap < c->file.start) {
1265 + c->file.mmap.offset += we_want_to_mmap;
1269 + /* length is rel, c->offset too, assume there is no limit at the mmap-boundaries */
1270 + to_mmap = (c->file.start + c->file.length) - c->file.mmap.offset;
1271 + if(to_mmap > we_want_to_mmap) to_mmap = we_want_to_mmap;
1272 + /* we have more to send than we can mmap() at once */
1273 + if(we_want_to_send > to_mmap) we_want_to_send = to_mmap;
1275 + if (-1 == c->file.fd) { /* open the file if not already open */
1276 + if (-1 == (c->file.fd = open(c->file.name->ptr, O_RDONLY))) {
1277 + log_error_write(srv, __FILE__, __LINE__, "sbs", "open failed for:", c->file.name, strerror(errno));
1282 + fcntl(c->file.fd, F_SETFD, FD_CLOEXEC);
1286 + if (MAP_FAILED == (c->file.mmap.start = mmap(0, to_mmap, PROT_READ, MAP_SHARED, c->file.fd, c->file.mmap.offset))) {
1287 + /* close it here, otherwise we'd have to set FD_CLOEXEC */
1289 + log_error_write(srv, __FILE__, __LINE__, "ssbd", "mmap failed:",
1290 + strerror(errno), c->file.name, c->file.fd);
1295 + c->file.mmap.length = to_mmap;
1296 +#ifdef LOCAL_BUFFERING
1297 + buffer_copy_string_len(c->mem, c->file.mmap.start, c->file.mmap.length);
1299 +#ifdef HAVE_MADVISE
1300 + /* don't advise files < 64Kb */
1301 + if (c->file.mmap.length > (64 KByte) &&
1302 + 0 != madvise(c->file.mmap.start, c->file.mmap.length, MADV_WILLNEED)) {
1303 + log_error_write(srv, __FILE__, __LINE__, "ssbd", "madvise failed:",
1304 + strerror(errno), c->file.name, c->file.fd);
1309 + /* chunk_reset() or chunk_free() will cleanup for us */
1312 + /* to_send = abs_mmap_end - abs_offset */
1313 + toSend = (c->file.mmap.offset + c->file.mmap.length) - (abs_offset);
1314 + if(toSend > we_want_to_send) toSend = we_want_to_send;
1317 + log_error_write(srv, __FILE__, __LINE__, "soooo",
1318 + "toSend is negative:",
1320 + c->file.mmap.length,
1322 + c->file.mmap.offset);
1323 + assert(toSend < 0);
1326 +#ifdef LOCAL_BUFFERING
1327 + start = c->mem->ptr;
1329 + start = c->file.mmap.start;
1332 + if(p->conf.debug) {
1333 + log_error_write(srv, __FILE__, __LINE__, "sdsd",
1334 + "compress file chunk: offset=", (int)c->offset,
1335 + ", toSend=", (int)toSend);
1337 + if (mod_deflate_compress(srv, con, hctx,
1338 + (unsigned char *)start + (abs_offset - c->file.mmap.offset), toSend) < 0) {
1339 + log_error_write(srv, __FILE__, __LINE__, "s",
1340 + "compress failed.");
1344 + c->offset += toSend;
1345 + if (c->offset == c->file.length) {
1346 + /* we don't need the mmaping anymore */
1347 + if (c->file.mmap.start != MAP_FAILED) {
1348 + munmap(c->file.mmap.start, c->file.mmap.length);
1349 + c->file.mmap.start = MAP_FAILED;
1356 +static int deflate_compress_cleanup(server *srv, connection *con, handler_ctx *hctx) {
1357 + plugin_data *p = hctx->plugin_data;
1360 + rc = mod_deflate_stream_end(srv, con, hctx);
1362 + log_error_write(srv, __FILE__, __LINE__, "s", "error closing stream");
1364 + if(p->conf.debug) {
1365 + log_error_write(srv, __FILE__, __LINE__, "sdsd",
1366 + " in:", hctx->bytes_in,
1367 + " out:", hctx->bytes_out);
1370 + /* cleanup compression state */
1371 + if(hctx->output != p->tmp_buf) {
1372 + buffer_free(hctx->output);
1374 + handler_ctx_free(hctx);
1375 + con->plugin_ctx[p->id] = NULL;
1380 +static handler_t deflate_compress_response(server *srv, connection *con, handler_ctx *hctx, int end) {
1381 + plugin_data *p = hctx->plugin_data;
1383 + size_t chunks_written = 0;
1384 + int chunk_finished = 0;
1387 + int close_stream = 0;
1391 + /* move all chunk from write_queue into our in_queue */
1392 + chunkqueue_append_chunkqueue(hctx->in_queue, con->write_queue);
1394 + len = chunkqueue_length(hctx->in_queue);
1395 + if(p->conf.debug) {
1396 + log_error_write(srv, __FILE__, __LINE__, "sd",
1397 + "compress: in_queue len=", len);
1399 + /* calculate max bytes to compress for this call. */
1401 + max = p->conf.work_block_size * 1024;
1402 + if(max == 0 || max > len) max = len;
1407 + /* Compress chunks from in_queue into chunks for write_queue */
1408 + for(c = hctx->in_queue->first; c && out < max; c = c->next) {
1409 + chunk_finished = 0;
1414 + len = c->mem->used - 1;
1415 + if(len > (max - out)) len = max - out;
1416 + if (mod_deflate_compress(srv, con, hctx, (unsigned char *)c->mem->ptr, len) < 0) {
1417 + log_error_write(srv, __FILE__, __LINE__, "s",
1418 + "compress failed.");
1419 + return HANDLER_ERROR;
1423 + if (c->offset == c->mem->used - 1) {
1424 + chunk_finished = 1;
1429 + len = c->file.length - c->offset;
1430 + if(len > (max - out)) len = max - out;
1431 + if ((len = mod_deflate_file_chunk(srv, con, hctx, c, len)) < 0) {
1432 + log_error_write(srv, __FILE__, __LINE__, "s",
1433 + "compress file chunk failed.");
1434 + return HANDLER_ERROR;
1437 + if (c->offset == c->file.length) {
1438 + chunk_finished = 1;
1444 + log_error_write(srv, __FILE__, __LINE__, "ds", c, "type not known");
1446 + return HANDLER_ERROR;
1448 + if(!chunk_finished) break;
1450 + if(p->conf.debug) {
1451 + log_error_write(srv, __FILE__, __LINE__, "sd",
1452 + "compressed bytes:", out);
1454 + hctx->in_queue->bytes_out += out;
1456 + if(chunks_written > 0) {
1457 + chunkqueue_remove_finished_chunks(hctx->in_queue);
1460 + close_stream = (con->file_finished && chunkqueue_is_empty(hctx->in_queue));
1461 + rc = mod_deflate_stream_flush(srv, con, hctx, close_stream);
1463 + log_error_write(srv, __FILE__, __LINE__, "s", "flush error");
1465 + if(close_stream || end) {
1466 + deflate_compress_cleanup(srv, con, hctx);
1467 + if(p->conf.debug) {
1468 + log_error_write(srv, __FILE__, __LINE__, "sbsb",
1469 + "finished uri:", con->uri.path_raw, ", query:", con->uri.query);
1471 + return HANDLER_FINISHED;
1473 + if(!chunkqueue_is_empty(hctx->in_queue)) {
1474 + /* We have more data to compress. */
1475 + joblist_append(srv, con);
1477 + return HANDLER_COMEBACK;
1483 +static int mod_deflate_patch_connection(server *srv, connection *con, plugin_data *p) {
1485 + plugin_config *s = p->config_storage[0];
1487 + PATCH(output_buffer_size);
1489 + PATCH(compression_level);
1491 + PATCH(window_size);
1492 + PATCH(min_compress_size);
1493 + PATCH(work_block_size);
1497 + PATCH(sync_flush);
1499 + /* skip the first, the global context */
1500 + for (i = 1; i < srv->config_context->used; i++) {
1501 + data_config *dc = (data_config *)srv->config_context->data[i];
1502 + s = p->config_storage[i];
1504 + /* condition didn't match */
1505 + if (!config_check_cond(srv, con, dc)) continue;
1507 + /* merge config */
1508 + for (j = 0; j < dc->value->used; j++) {
1509 + data_unset *du = dc->value->data[j];
1511 + if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.output-buffer-size"))) {
1512 + PATCH(output_buffer_size);
1513 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.mimetypes"))) {
1515 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.compression-level"))) {
1516 + PATCH(compression_level);
1517 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.mem-level"))) {
1519 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.window-size"))) {
1520 + PATCH(window_size);
1521 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.min-compress-size"))) {
1522 + PATCH(min_compress_size);
1523 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.work-block-size"))) {
1524 + PATCH(work_block_size);
1525 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.enabled"))) {
1527 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.debug"))) {
1529 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.bzip2"))) {
1531 + } else if (buffer_is_equal_string(du->key, CONST_STR_LEN("deflate.sync-flush"))) {
1532 + PATCH(sync_flush);
1541 +PHYSICALPATH_FUNC(mod_deflate_handle_response_start) {
1542 + plugin_data *p = p_d;
1543 + handler_ctx *hctx;
1545 + int accept_encoding = 0;
1547 + int srv_encodings = 0;
1548 + int matched_encodings = 0;
1549 + const char *dflt_gzip = "gzip";
1550 + const char *dflt_deflate = "deflate";
1551 + const char *dflt_bzip2 = "bzip2";
1552 + const char *compression_name = NULL;
1557 + /* disable compression for some http status types. */
1558 + switch(con->http_status) {
1564 + /* disable compression as we have no response entity */
1565 + return HANDLER_GO_ON;
1570 + mod_deflate_patch_connection(srv, con, p);
1572 + /* is compression allowed */
1573 + if(!p->conf.enabled) {
1574 + if(p->conf.debug) {
1575 + log_error_write(srv, __FILE__, __LINE__, "s", "compression disabled.");
1577 + return HANDLER_GO_ON;
1580 + /* the response might change according to Accept-Encoding */
1581 + if (NULL != (ds = (data_string *)array_get_element(con->response.headers, "Vary"))) {
1582 + /* append Accept-Encoding to Vary header */
1583 + if (NULL == strstr(ds->value->ptr, "Accept-Encoding")) {
1584 + buffer_append_string(ds->value, ",Accept-Encoding");
1587 + response_header_insert(srv, con, CONST_STR_LEN("Vary"),
1588 + CONST_STR_LEN("Accept-Encoding"));
1591 + /* Check if response has a Content-Encoding. */
1592 + if (NULL != (ds = (data_string *)array_get_element(con->response.headers, "Content-Encoding"))) {
1593 + return HANDLER_GO_ON;
1596 + /* Check Accept-Encoding for supported encoding. */
1597 + if (NULL == (ds = (data_string *)array_get_element(con->request.headers, "Accept-Encoding"))) {
1598 + return HANDLER_GO_ON;
1601 + /* get client side support encodings */
1602 + value = ds->value->ptr;
1604 + if (NULL != strstr(value, "gzip")) accept_encoding |= HTTP_ACCEPT_ENCODING_GZIP;
1605 + if (NULL != strstr(value, "deflate")) accept_encoding |= HTTP_ACCEPT_ENCODING_DEFLATE;
1607 + /* if (NULL != strstr(value, "compress")) accept_encoding |= HTTP_ACCEPT_ENCODING_COMPRESS; */
1609 + if(p->conf.bzip2) {
1610 + if (NULL != strstr(value, "bzip2")) accept_encoding |= HTTP_ACCEPT_ENCODING_BZIP2;
1613 + if (NULL != strstr(value, "identity")) accept_encoding |= HTTP_ACCEPT_ENCODING_IDENTITY;
1615 + /* get server side supported ones */
1617 + if(p->conf.bzip2) {
1618 + srv_encodings |= HTTP_ACCEPT_ENCODING_BZIP2;
1622 + srv_encodings |= HTTP_ACCEPT_ENCODING_GZIP;
1623 + srv_encodings |= HTTP_ACCEPT_ENCODING_DEFLATE;
1626 + /* find matching encodings */
1627 + matched_encodings = accept_encoding & srv_encodings;
1628 + if (!matched_encodings) {
1629 + return HANDLER_GO_ON;
1632 + /* check if size of response is below min-compress-size */
1633 + if(con->file_finished && con->request.http_method != HTTP_METHOD_HEAD) {
1634 + file_len = chunkqueue_length(con->write_queue);
1635 + if(file_len == 0) return HANDLER_GO_ON;
1639 + if(file_len > 0 && p->conf.min_compress_size > 0 && file_len < p->conf.min_compress_size) {
1640 + if(p->conf.debug) {
1641 + log_error_write(srv, __FILE__, __LINE__, "sd",
1642 + "Content-Length smaller then min_compress_size: file_len=", file_len);
1644 + return HANDLER_GO_ON;
1647 + /* Check mimetype in response header "Content-Type" */
1648 + if (NULL != (ds = (data_string *)array_get_element(con->response.headers, "Content-Type"))) {
1650 + if(p->conf.debug) {
1651 + log_error_write(srv, __FILE__, __LINE__, "sb",
1652 + "Content-Type:", ds->value);
1654 + for (m = 0; m < p->conf.mimetypes->used; m++) {
1655 + data_string *mimetype = (data_string *)p->conf.mimetypes->data[m];
1657 + if(p->conf.debug) {
1658 + log_error_write(srv, __FILE__, __LINE__, "sb",
1659 + "mime-type:", mimetype->value);
1661 + if (buffer_is_equal(mimetype->value, ds->value)) {
1662 + /* mimetype found */
1667 + if(!found && p->conf.mimetypes->used > 0) {
1668 + if(p->conf.debug) {
1669 + log_error_write(srv, __FILE__, __LINE__, "sb",
1670 + "No compression for mimetype:", ds->value);
1672 + return HANDLER_GO_ON;
1676 + if(p->conf.debug) {
1677 + log_error_write(srv, __FILE__, __LINE__, "s",
1678 + "enable compression.");
1680 + /* enable compression */
1681 + hctx = handler_ctx_init();
1682 + hctx->plugin_data = p;
1684 + /* select best matching encoding */
1685 + if (matched_encodings & HTTP_ACCEPT_ENCODING_BZIP2) {
1686 + hctx->compression_type = HTTP_ACCEPT_ENCODING_BZIP2;
1687 + compression_name = dflt_bzip2;
1688 + rc = stream_bzip2_init(srv, con, hctx);
1689 + } else if (matched_encodings & HTTP_ACCEPT_ENCODING_GZIP) {
1690 + hctx->compression_type = HTTP_ACCEPT_ENCODING_GZIP;
1691 + compression_name = dflt_gzip;
1692 + rc = stream_deflate_init(srv, con, hctx);
1693 + } else if (matched_encodings & HTTP_ACCEPT_ENCODING_DEFLATE) {
1694 + hctx->compression_type = HTTP_ACCEPT_ENCODING_DEFLATE;
1695 + compression_name = dflt_deflate;
1696 + rc = stream_deflate_init(srv, con, hctx);
1699 + log_error_write(srv, __FILE__, __LINE__, "s",
1700 + "Failed to initialize compression.");
1703 + handler_ctx_free(hctx);
1704 + return HANDLER_GO_ON;
1707 + /* setup output buffer. */
1708 + if(p->conf.sync_flush || p->conf.output_buffer_size == 0) {
1709 + buffer_prepare_copy(p->tmp_buf, 32 * 1024);
1710 + hctx->output = p->tmp_buf;
1712 + hctx->output = buffer_init();
1713 + buffer_prepare_copy(hctx->output, p->conf.output_buffer_size);
1715 + con->plugin_ctx[p->id] = hctx;
1717 + /* set Content-Encoding to show selected compression type. */
1718 + response_header_overwrite(srv, con, CONST_STR_LEN("Content-Encoding"), compression_name, strlen(compression_name));
1720 + /* if file finished and size less then work-block-size, then compress the content now. */
1721 + if(con->file_finished && (p->conf.work_block_size == 0 || file_len < (p->conf.work_block_size * 1024)) &&
1722 + con->request.http_method != HTTP_METHOD_HEAD) {
1723 + /* We don't have to use chunked encoding. */
1724 + con->response.transfer_encoding = 0;
1725 + con->parsed_response &= ~(HTTP_CONTENT_LENGTH);
1726 + /* Compress all response content. */
1727 + if(p->conf.debug) {
1728 + log_error_write(srv, __FILE__, __LINE__, "sd",
1729 + "Compress all content and use Content-Length header: uncompress len=", file_len);
1731 + return deflate_compress_response(srv, con, hctx, 1);
1733 + /* Remove Content-Length header. We don't know the length. */
1734 + con->parsed_response &= ~(HTTP_CONTENT_LENGTH);
1735 + if (con->request.http_version == HTTP_VERSION_1_1) {
1736 + /* Make sure to use chunked encoding. */
1737 + con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED;
1738 + /* Remove Content-Length header. We don't know the length. */
1739 + con->parsed_response &= ~(HTTP_CONTENT_LENGTH);
1741 + /* HTTP/1.0 doesn't support chunked transfer encoding. */
1742 + con->response.transfer_encoding = 0;
1743 + return deflate_compress_response(srv, con, hctx, 1);
1747 + return HANDLER_GO_ON;
1750 +JOBLIST_FUNC(mod_deflate_handle_response_filter) {
1751 + plugin_data *p = p_d;
1752 + handler_ctx *hctx = con->plugin_ctx[p->id];
1754 + if(hctx == NULL) return HANDLER_GO_ON;
1755 + if(!hctx->stream_open) return HANDLER_GO_ON;
1756 + if(con->request.http_method == HTTP_METHOD_HEAD) return HANDLER_GO_ON;
1758 + return deflate_compress_response(srv, con, hctx, 0);
1761 +handler_t mod_deflate_cleanup(server *srv, connection *con, void *p_d) {
1762 + plugin_data *p = p_d;
1763 + handler_ctx *hctx = con->plugin_ctx[p->id];
1765 + if(hctx == NULL) return HANDLER_GO_ON;
1767 + if(p->conf.debug && hctx->stream_open) {
1768 + log_error_write(srv, __FILE__, __LINE__, "sbsb",
1769 + "stream open at cleanup. uri=", con->uri.path_raw, ", query=", con->uri.query);
1772 + deflate_compress_cleanup(srv, con, hctx);
1774 + return HANDLER_GO_ON;
1777 +int mod_deflate_plugin_init(plugin *p) {
1778 + p->version = LIGHTTPD_VERSION_ID;
1779 + p->name = buffer_init_string("deflate");
1781 + p->init = mod_deflate_init;
1782 + p->cleanup = mod_deflate_free;
1783 + p->set_defaults = mod_deflate_setdefaults;
1784 + p->connection_reset = mod_deflate_cleanup;
1785 + p->handle_connection_close = mod_deflate_cleanup;
1786 + p->handle_response_start = mod_deflate_handle_response_start;
1787 + p->handle_response_filter = mod_deflate_handle_response_filter;
1793 diff -Naur lighttpd-1.4.10.orig/src/plugin.c lighttpd-1.4.10/src/plugin.c
1794 --- lighttpd-1.4.10.orig/src/plugin.c 2006-02-08 04:00:54.000000000 -0800
1795 +++ lighttpd-1.4.10/src/plugin.c 2006-02-16 22:25:16.514345356 -0800
1797 PLUGIN_FUNC_HANDLE_SIGHUP,
1798 PLUGIN_FUNC_HANDLE_SUBREQUEST,
1799 PLUGIN_FUNC_HANDLE_SUBREQUEST_START,
1800 + PLUGIN_FUNC_HANDLE_RESPONSE_START,
1801 + PLUGIN_FUNC_HANDLE_RESPONSE_FILTER,
1802 PLUGIN_FUNC_HANDLE_JOBLIST,
1803 PLUGIN_FUNC_HANDLE_DOCROOT,
1804 PLUGIN_FUNC_HANDLE_PHYSICAL,
1806 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_CONNECTION_CLOSE, handle_connection_close)
1807 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SUBREQUEST, handle_subrequest)
1808 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SUBREQUEST_START, handle_subrequest_start)
1809 +PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_RESPONSE_START, handle_response_start)
1810 +PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_RESPONSE_FILTER, handle_response_filter)
1811 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_JOBLIST, handle_joblist)
1812 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_DOCROOT, handle_docroot)
1813 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_PHYSICAL, handle_physical)
1815 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SIGHUP, handle_sighup);
1816 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SUBREQUEST, handle_subrequest);
1817 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_SUBREQUEST_START, handle_subrequest_start);
1818 + PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_RESPONSE_START, handle_response_start);
1819 + PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_RESPONSE_FILTER, handle_response_filter);
1820 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_JOBLIST, handle_joblist);
1821 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_DOCROOT, handle_docroot);
1822 PLUGIN_TO_SLOT(PLUGIN_FUNC_HANDLE_PHYSICAL, handle_physical);
1823 diff -Naur lighttpd-1.4.10.orig/src/plugin.h lighttpd-1.4.10/src/plugin.h
1824 --- lighttpd-1.4.10.orig/src/plugin.h 2005-08-15 02:28:56.000000000 -0700
1825 +++ lighttpd-1.4.10/src/plugin.h 2006-02-16 22:25:16.514345356 -0800
1829 handler_t (* handle_subrequest) (server *srv, connection *con, void *p_d); /* */
1830 + handler_t (* handle_response_start) (server *srv, connection *con, void *p_d); /* before response headers are written */
1831 + handler_t (* handle_response_filter) (server *srv, connection *con, void *p_d); /* response content filter */
1832 handler_t (* connection_reset) (server *srv, connection *con, void *p_d); /* */
1836 handler_t plugins_call_handle_uri_clean(server *srv, connection *con);
1837 handler_t plugins_call_handle_subrequest_start(server *srv, connection *con);
1838 handler_t plugins_call_handle_subrequest(server *srv, connection *con);
1839 +handler_t plugins_call_handle_response_start(server *srv, connection *con);
1840 +handler_t plugins_call_handle_response_filter(server *srv, connection *con);
1841 handler_t plugins_call_handle_request_done(server *srv, connection *con);
1842 handler_t plugins_call_handle_docroot(server *srv, connection *con);
1843 handler_t plugins_call_handle_physical(server *srv, connection *con);
1844 diff -Naur lighttpd-1.4.10.orig/src/response.c lighttpd-1.4.10/src/response.c
1845 --- lighttpd-1.4.10.orig/src/response.c 2006-02-08 04:01:01.000000000 -0800
1846 +++ lighttpd-1.4.10/src/response.c 2006-02-16 22:25:16.515345187 -0800
1849 int have_server = 0;
1851 - b = chunkqueue_get_prepend_buffer(con->write_queue);
1852 + b = chunkqueue_get_prepend_buffer(con->output_queue);
1854 if (con->request.http_version == HTTP_VERSION_1_1) {
1855 BUFFER_COPY_STRING_CONST(b, "HTTP/1.1 ");
1856 diff -Naur lighttpd-1.4.10.orig/src/server.c lighttpd-1.4.10/src/server.c
1857 --- lighttpd-1.4.10.orig/src/server.c 2006-02-01 03:50:02.000000000 -0800
1858 +++ lighttpd-1.4.10/src/server.c 2006-02-16 22:25:16.516345019 -0800
1860 srv->joblist = calloc(1, sizeof(*srv->joblist));
1861 assert(srv->joblist);
1863 + srv->joblist_prev = calloc(1, sizeof(*srv->joblist));
1864 + assert(srv->joblist_prev);
1866 srv->fdwaitqueue = calloc(1, sizeof(*srv->fdwaitqueue));
1867 assert(srv->fdwaitqueue);
1872 joblist_free(srv, srv->joblist);
1873 + joblist_free(srv, srv->joblist_prev);
1874 fdwaitqueue_free(srv, srv->fdwaitqueue);
1876 if (srv->stat_cache) {
1877 @@ -1016,6 +1020,7 @@
1879 while (!srv_shutdown) {
1885 @@ -1243,7 +1248,12 @@
1889 - if ((n = fdevent_poll(srv->ev, 1000)) > 0) {
1890 + if(srv->joblist->used > 0) {
1895 + if ((n = fdevent_poll(srv->ev, timeout)) > 0) {
1896 /* n is the number of events */
1899 @@ -1291,25 +1301,29 @@
1903 - for (ndx = 0; ndx < srv->joblist->used; ndx++) {
1904 - connection *con = srv->joblist->ptr[ndx];
1907 - connection_state_machine(srv, con);
1909 - switch(r = plugins_call_handle_joblist(srv, con)) {
1910 - case HANDLER_FINISHED:
1911 - case HANDLER_GO_ON:
1914 - log_error_write(srv, __FILE__, __LINE__, "d", r);
1916 + if(srv->joblist->used > 0) {
1917 + connections *joblist = srv->joblist;
1918 + /* switch joblist queues. */
1919 + srv->joblist = srv->joblist_prev;
1920 + srv->joblist_prev = joblist;
1921 + for (ndx = 0; ndx < joblist->used; ndx++) {
1922 + connection *con = joblist->ptr[ndx];
1925 + con->in_joblist = 0;
1926 + connection_state_machine(srv, con);
1928 + switch(r = plugins_call_handle_joblist(srv, con)) {
1929 + case HANDLER_FINISHED:
1930 + case HANDLER_GO_ON:
1933 + log_error_write(srv, __FILE__, __LINE__, "d", r);
1938 - con->in_joblist = 0;
1939 + joblist->used = 0;
1942 - srv->joblist->used = 0;
1945 if (srv->srvconf.pid_file->used &&