SOURCES: elinks-chunked.patch (NEW) - gzip encoded chunked data wa...

witekfl witekfl at pld-linux.org
Thu Mar 13 15:29:04 CET 2008


Author: witekfl                      Date: Thu Mar 13 14:29:04 2008 GMT
Module: SOURCES                       Tag: HEAD
---- Log message:
- gzip encoded chunked data was not always properly decoded.
  Files bigger than 65536 bytes might be a garbage. This patch
  fixes this issue.

---- Files affected:
SOURCES:
   elinks-chunked.patch (NONE -> 1.1)  (NEW)

---- Diffs:

================================================================
Index: SOURCES/elinks-chunked.patch
diff -u /dev/null SOURCES/elinks-chunked.patch:1.1
--- /dev/null	Thu Mar 13 15:29:04 2008
+++ SOURCES/elinks-chunked.patch	Thu Mar 13 15:28:59 2008
@@ -0,0 +1,139 @@
+diff --git a/src/protocol/http/http.c b/src/protocol/http/http.c
+index 1264f6f..6de3078 100644
+--- a/src/protocol/http/http.c
++++ b/src/protocol/http/http.c
+@@ -983,29 +983,23 @@ decompress_data(struct connection *conn,
+ 		int *new_len)
+ {
+ 	struct http_connection_info *http = conn->info;
+-	/* to_read is number of bytes to be read from the decoder. It is 65536
+-	 * (then we are just emptying the decoder buffer as we finished the walk
+-	 * through the incoming stream already) or PIPE_BUF / 2 (when we are
+-	 * still walking through the stream - then we write PIPE_BUF / 2 to the
+-	 * pipe and read it back to the decoder ASAP; the point is that we can't
+-	 * write more than PIPE_BUF to the pipe at once, but we also have to
+-	 * never let read_encoded() (gzread(), in fact) to empty the pipe - that
+-	 * causes further malfunction of zlib :[ ... so we will make sure that
+-	 * we will always have at least PIPE_BUF / 2 + 1 in the pipe (returning
+-	 * early otherwise)). */
+-	int to_read = PIPE_BUF / 2, did_read = 0;
++	enum { NORMAL, FINISHING } state = NORMAL;
++	int did_read = 0;
+ 	int *length_of_block;
+ 	unsigned char *output = NULL;
+ 
+-	length_of_block = (http->length == LEN_CHUNKED ? &http->chunk_remaining
+-						       : &http->length);
+-
+ #define BIG_READ 65536
+-	if (!*length_of_block) {
+-		/* Going to finish this decoding bussiness. */
+-		/* Some nicely big value - empty encoded output queue by reading
+-		 * big chunks from it. */
+-		to_read = BIG_READ;
++
++	if (http->length == LEN_CHUNKED) {
++		if (http->chunk_remaining == CHUNK_ZERO_SIZE)
++			state = FINISHING;
++		length_of_block = &http->chunk_remaining;
++	} else {
++		length_of_block = &http->length;
++		if (!*length_of_block) {
++			/* Going to finish this decoding bussiness. */
++			state = FINISHING;
++		}
+ 	}
+ 
+ 	if (conn->content_encoding == ENCODING_NONE) {
+@@ -1024,14 +1018,13 @@ #define BIG_READ 65536
+ 	}
+ 
+ 	do {
+-		int init = 0;
++		unsigned char *tmp;
+ 
+-		if (to_read == PIPE_BUF / 2) {
++		if (state == NORMAL) {
+ 			/* ... we aren't finishing yet. */
+-			int written = safe_write(conn->stream_pipes[1], data,
+-						 len > to_read ? to_read : len);
++			int written = safe_write(conn->stream_pipes[1], data, len);
+ 
+-			if (written > 0) {
++			if (written >= 0) {
+ 				data += written;
+ 				len -= written;
+ 
+@@ -1042,7 +1035,7 @@ #define BIG_READ 65536
+ 				 * non-keep-alive and chunked */
+ 				if (!http->length) {
+ 					/* That's all, folks - let's finish this. */
+-					to_read = BIG_READ;
++					state = FINISHING;
+ 				} else if (!len) {
+ 					/* We've done for this round (but not done
+ 					 * completely). Thus we will get out with
+@@ -1061,28 +1054,26 @@ #define BIG_READ 65536
+ 			conn->stream = open_encoded(conn->stream_pipes[0],
+ 					conn->content_encoding);
+ 			if (!conn->stream) return NULL;
+-			/* On "startup" pipe is treated with care, but if everything
+-			 * was already written to the pipe, caution isn't necessary */
+-			else if (to_read != BIG_READ) init = 1;
+-		} else init = 0;
++		}
+ 
+-		output = (unsigned char *) mem_realloc(output, *new_len + to_read);
+-		if (!output) break;
++		tmp = mem_realloc(output, *new_len + BIG_READ);
++		if (!tmp) break;
++		output = tmp;
++
++		did_read = read_encoded(conn->stream, output + *new_len, BIG_READ);
+ 
+-		did_read = read_encoded(conn->stream, output + *new_len,
+-					init ? PIPE_BUF / 32 : to_read); /* on init don't read too much */
+ 		if (did_read > 0) *new_len += did_read;
+-		else if (did_read == -1) {
+-			mem_free_set(&output, NULL);
+-			*new_len = 0;
+-			break; /* Loop prevention (bug 517), is this correct ? --Zas */
++		else {
++			if (did_read < 0) state = FINISHING;
++			break;
+ 		}
+-	} while (len || did_read == BIG_READ);
++	} while (len || (did_read == BIG_READ));
+ 
+-	shutdown_connection_stream(conn);
++	if (state == FINISHING) shutdown_connection_stream(conn);
+ 	return output;
+ }
+ 
++
+ static int
+ is_line_in_buffer(struct read_buffer *rb)
+ {
+@@ -1206,11 +1197,8 @@ read_chunked_http_data(struct connection
+ 		} else {
+ 			unsigned char *data;
+ 			int data_len;
+-			int len;
+ 			int zero = (http->chunk_remaining == CHUNK_ZERO_SIZE);
+-
+-			if (zero) http->chunk_remaining = 0;
+-			len = http->chunk_remaining;
++			int len = zero ? 0 : http->chunk_remaining;
+ 
+ 			/* Maybe everything necessary didn't come yet.. */
+ 			int_upper_bound(&len, rb->length);
+@@ -1850,8 +1838,7 @@ #endif
+ 		conn->cached->encoding_info = stracpy(get_encoding_name(conn->content_encoding));
+ 	}
+ 
+-	if (http->length == -1
+-	    || (PRE_HTTP_1_1(http->recv_version) && http->close))
++	if (http->length == -1 || http->close)
+ 		socket->state = SOCKET_END_ONCLOSE;
+ 
+ 	read_http_data(socket, rb);
================================================================


More information about the pld-cvs-commit mailing list