Skip to content

Commit

Permalink
Correct ensure_buffered_data
Browse files Browse the repository at this point in the history
The `read_blocks!` function does not necessarily fill/overwrite blocks till `lastindex(stream.blocks)`, we need to ensure `stream.block_index` does not increment past the index of the last loaded block.
  • Loading branch information
CiaranOMara committed Apr 28, 2023
1 parent 5e7a38a commit 886d474
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 5 deletions.
19 changes: 15 additions & 4 deletions src/bgzfstream.jl
Original file line number Diff line number Diff line change
Expand Up @@ -84,13 +84,20 @@ mutable struct BGZFStream{T<:IO} <: IO
# current block index
block_index::Int

# index of the last block loaded
last_block_index::Int

# whether stream is open
isopen::Bool

# callback function called when closing the stream
onclose::Function
end

function BGZFStream(io, mode, blocks, block_index, isopen, onclose) # This method maintains compatibility after the inclusion of the `last_block_index` field to `BGZFStream`.
return BGZFStream(io, mode, blocks, block_index, 0, isopen, onclose)
end

# BGZF blocks are no larger than 64 KiB before and after compression.
const BGZF_MAX_BLOCK_SIZE = UInt(64 * 1024)

Expand Down Expand Up @@ -315,16 +322,19 @@ end
@inline function ensure_buffered_data(stream)
#@assert stream.mode == READ_MODE
@label doit
while stream.block_index lastindex(stream.blocks)
while stream.block_index < stream.last_block_index
@inbounds block = stream.blocks[stream.block_index]
if is_eof_block(block.compressed_block) # Note: `read_blocks!` does not necessarily fill/overwrite blocks till `lastindex(stream.blocks)`, we need to stop incrementing `stream.block_index` when an eof block is encountered.
break
end
if block.position block.size
return stream.block_index
end
stream.block_index += 1
end
if stream.block_index == stream.last_block_index
@inbounds block = stream.blocks[stream.block_index]
if block.position block.size
return stream.block_index
end
end
if !eof(stream.io)
read_blocks!(stream)
@goto doit
Expand Down Expand Up @@ -364,6 +374,7 @@ function read_blocks!(stream)
end
while n_blocks < length(stream.blocks) && !eof(stream.io)
block = stream.blocks[n_blocks += 1]
stream.last_block_index = n_blocks
if has_position
block.block_offset = position(stream.io)
end
Expand Down
2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ end
@test stream.blocks[1].size == BGZFStreams.BGZF_SAFE_BLOCK_SIZE

# Generate n blocks of data.
data = rand(0x00:0xf0, (n*BGZFStreams.BGZF_SAFE_BLOCK_SIZE) )
data = rand(0x00:0xf0, ((n+1)*BGZFStreams.BGZF_SAFE_BLOCK_SIZE) )

write_offsets = BGZFStreams.VirtualOffset[]

Expand Down

0 comments on commit 886d474

Please sign in to comment.