From 145f07c0375eea8a62ee2df9c06a01083800e6a8 Mon Sep 17 00:00:00 2001 From: Fabio Alessandrelli Date: Sun, 20 Nov 2022 22:17:40 +0100 Subject: [Net] Fix HTTPRequest gzip with high compression ratio. Decompress each body chunk over multiple iterations, this causes more reallocations, but it ensures decompression will not fail no matter the compression ratio. --- scene/main/http_request.cpp | 49 +++++++++++++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 15 deletions(-) (limited to 'scene/main') diff --git a/scene/main/http_request.cpp b/scene/main/http_request.cpp index 2c395ec07d..62f362553f 100644 --- a/scene/main/http_request.cpp +++ b/scene/main/http_request.cpp @@ -276,10 +276,10 @@ bool HTTPRequest::_handle_response(bool *ret_value) { } if (content_encoding == "gzip") { decompressor.instantiate(); - decompressor->start_decompression(false, get_download_chunk_size() * 2); + decompressor->start_decompression(false, get_download_chunk_size()); } else if (content_encoding == "deflate") { decompressor.instantiate(); - decompressor->start_decompression(true, get_download_chunk_size() * 2); + decompressor->start_decompression(true, get_download_chunk_size()); } return false; @@ -390,19 +390,38 @@ bool HTTPRequest::_update_connection() { return false; } - PackedByteArray chunk = client->read_response_body_chunk(); - downloaded.add(chunk.size()); - - // Decompress chunk if needed. - if (decompressor.is_valid()) { - Error err = decompressor->put_data(chunk.ptr(), chunk.size()); - if (err == OK) { - chunk.resize(decompressor->get_available_bytes()); - err = decompressor->get_data(chunk.ptrw(), chunk.size()); - } - if (err != OK) { - _defer_done(RESULT_BODY_DECOMPRESS_FAILED, response_code, response_headers, PackedByteArray()); - return true; + PackedByteArray chunk; + if (decompressor.is_null()) { + // Chunk can be read directly. + chunk = client->read_response_body_chunk(); + downloaded.add(chunk.size()); + } else { + // Chunk is the result of decompression. + PackedByteArray compressed = client->read_response_body_chunk(); + downloaded.add(compressed.size()); + + int pos = 0; + int left = compressed.size(); + while (left) { + int w = 0; + Error err = decompressor->put_partial_data(compressed.ptr() + pos, left, w); + if (err == OK) { + PackedByteArray dc; + dc.resize(decompressor->get_available_bytes()); + err = decompressor->get_data(dc.ptrw(), dc.size()); + chunk.append_array(dc); + } + if (err != OK) { + _defer_done(RESULT_BODY_DECOMPRESS_FAILED, response_code, response_headers, PackedByteArray()); + return true; + } + // We need this check here because a "zip bomb" could result in a chunk of few kilos decompressing into gigabytes of data. + if (body_size_limit >= 0 && final_body_size.get() + chunk.size() > body_size_limit) { + _defer_done(RESULT_BODY_SIZE_LIMIT_EXCEEDED, response_code, response_headers, PackedByteArray()); + return true; + } + pos += w; + left -= w; } } final_body_size.add(chunk.size()); -- cgit v1.2.3