summaryrefslogtreecommitdiff
path: root/scene
diff options
context:
space:
mode:
authorFabio Alessandrelli <fabio.alessandrelli@gmail.com>2022-11-20 22:17:40 +0100
committerFabio Alessandrelli <fabio.alessandrelli@gmail.com>2022-11-20 22:41:14 +0100
commit145f07c0375eea8a62ee2df9c06a01083800e6a8 (patch)
treede9c55f8400e938876ce3672a00f3332bdf34b98 /scene
parent98da707df5a40d37e85070cec6babfee4fae5765 (diff)
[Net] Fix HTTPRequest gzip with high compression ratio.
Decompress each body chunk over multiple iterations, this causes more reallocations, but it ensures decompression will not fail no matter the compression ratio.
Diffstat (limited to 'scene')
-rw-r--r--scene/main/http_request.cpp49
1 files changed, 34 insertions, 15 deletions
diff --git a/scene/main/http_request.cpp b/scene/main/http_request.cpp
index 2c395ec07d..62f362553f 100644
--- a/scene/main/http_request.cpp
+++ b/scene/main/http_request.cpp
@@ -276,10 +276,10 @@ bool HTTPRequest::_handle_response(bool *ret_value) {
}
if (content_encoding == "gzip") {
decompressor.instantiate();
- decompressor->start_decompression(false, get_download_chunk_size() * 2);
+ decompressor->start_decompression(false, get_download_chunk_size());
} else if (content_encoding == "deflate") {
decompressor.instantiate();
- decompressor->start_decompression(true, get_download_chunk_size() * 2);
+ decompressor->start_decompression(true, get_download_chunk_size());
}
return false;
@@ -390,19 +390,38 @@ bool HTTPRequest::_update_connection() {
return false;
}
- PackedByteArray chunk = client->read_response_body_chunk();
- downloaded.add(chunk.size());
-
- // Decompress chunk if needed.
- if (decompressor.is_valid()) {
- Error err = decompressor->put_data(chunk.ptr(), chunk.size());
- if (err == OK) {
- chunk.resize(decompressor->get_available_bytes());
- err = decompressor->get_data(chunk.ptrw(), chunk.size());
- }
- if (err != OK) {
- _defer_done(RESULT_BODY_DECOMPRESS_FAILED, response_code, response_headers, PackedByteArray());
- return true;
+ PackedByteArray chunk;
+ if (decompressor.is_null()) {
+ // Chunk can be read directly.
+ chunk = client->read_response_body_chunk();
+ downloaded.add(chunk.size());
+ } else {
+ // Chunk is the result of decompression.
+ PackedByteArray compressed = client->read_response_body_chunk();
+ downloaded.add(compressed.size());
+
+ int pos = 0;
+ int left = compressed.size();
+ while (left) {
+ int w = 0;
+ Error err = decompressor->put_partial_data(compressed.ptr() + pos, left, w);
+ if (err == OK) {
+ PackedByteArray dc;
+ dc.resize(decompressor->get_available_bytes());
+ err = decompressor->get_data(dc.ptrw(), dc.size());
+ chunk.append_array(dc);
+ }
+ if (err != OK) {
+ _defer_done(RESULT_BODY_DECOMPRESS_FAILED, response_code, response_headers, PackedByteArray());
+ return true;
+ }
+ // We need this check here because a "zip bomb" could result in a chunk of few kilos decompressing into gigabytes of data.
+ if (body_size_limit >= 0 && final_body_size.get() + chunk.size() > body_size_limit) {
+ _defer_done(RESULT_BODY_SIZE_LIMIT_EXCEEDED, response_code, response_headers, PackedByteArray());
+ return true;
+ }
+ pos += w;
+ left -= w;
}
}
final_body_size.add(chunk.size());