mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-06-09 09:34:57 +09:00
LibCompress: Turn the DEFLATE implementation into a stream.
Previously, the implementation would produce one Vector<u8> which would contain the whole decompressed data. That can be a lot and even exhaust memory. With these changes it is still necessary to store the whole input data in one piece (I am working on this next,) but the output can be read block by block. (That's not optimal either because blocks can be arbitrarily large, but it's good for now.)
This commit is contained in:
parent
30abadcff9
commit
8bbb7e25e6
Notes:
sideshowbarker
2024-07-19 03:24:00 +09:00
Author: https://github.com/asynts
Commit: 8bbb7e25e6
Pull-request: https://github.com/SerenityOS/serenity/pull/3220
Reviewed-by: https://github.com/awesomekling
7 changed files with 187 additions and 74 deletions
30
AK/Stream.h
30
AK/Stream.h
|
@ -309,10 +309,28 @@ public:
|
|||
}
|
||||
|
||||
m_read_offset += nread;
|
||||
|
||||
try_discard_chunks();
|
||||
|
||||
return nread;
|
||||
}
|
||||
|
||||
size_t read(Bytes bytes, size_t offset)
|
||||
{
|
||||
const auto backup = this->roffset();
|
||||
|
||||
bool do_discard_chunks = false;
|
||||
exchange(m_do_discard_chunks, do_discard_chunks);
|
||||
|
||||
rseek(offset);
|
||||
const auto count = read(bytes);
|
||||
rseek(backup);
|
||||
|
||||
exchange(m_do_discard_chunks, do_discard_chunks);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
bool read_or_error(Bytes bytes) override
|
||||
{
|
||||
if (m_write_offset - m_read_offset < bytes.size()) {
|
||||
|
@ -344,20 +362,24 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
void seek(size_t offset)
|
||||
size_t roffset() const { return m_read_offset; }
|
||||
size_t woffset() const { return m_write_offset; }
|
||||
|
||||
void rseek(size_t offset)
|
||||
{
|
||||
ASSERT(offset >= m_base_offset);
|
||||
ASSERT(offset <= m_write_offset);
|
||||
m_read_offset = offset;
|
||||
}
|
||||
|
||||
size_t offset() const { return m_read_offset; }
|
||||
|
||||
size_t remaining() const { return m_write_offset - m_read_offset; }
|
||||
|
||||
private:
|
||||
void try_discard_chunks()
|
||||
{
|
||||
if (!m_do_discard_chunks)
|
||||
return;
|
||||
|
||||
while (m_read_offset - m_base_offset >= history_size + chunk_size) {
|
||||
m_chunks.take_first();
|
||||
m_base_offset += chunk_size;
|
||||
|
@ -368,7 +390,9 @@ private:
|
|||
size_t m_write_offset { 0 };
|
||||
size_t m_read_offset { 0 };
|
||||
size_t m_base_offset { 0 };
|
||||
bool m_do_discard_chunks { false };
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
using AK::DuplexMemoryStream;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue