Skip to content

Commit

Permalink
Correct ensure_buffered_data
Browse files Browse the repository at this point in the history
The `read_blocks!` function does not necessarily fill/overwrite blocks till `lastindex(stream.blocks)`, we need to ensure `stream.block_index` does not increment past the index of the last loaded block.
  • Loading branch information
CiaranOMara committed Apr 20, 2023
1 parent 5e7a38a commit 8a5369c
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 6 deletions.
17 changes: 12 additions & 5 deletions src/bgzfstream.jl
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,9 @@ mutable struct BGZFStream{T<:IO} <: IO
# current block index
block_index::Int

# index of the last block loaded
last_block_index::Int

# whether stream is open
isopen::Bool

Expand Down Expand Up @@ -130,7 +133,7 @@ function BGZFStream(io::IO, mode::AbstractString="r")
# Write mode is not (yet?) multi-threaded.
blocks = [Block(mode′)]
end
return BGZFStream(io, mode′, blocks, 1, true, io -> close(io))
return BGZFStream(io, mode′, blocks, 1, 0, true, io -> close(io))
end

function BGZFStream(filename::AbstractString, mode::AbstractString = "r")
Expand Down Expand Up @@ -315,16 +318,19 @@ end
@inline function ensure_buffered_data(stream)
#@assert stream.mode == READ_MODE
@label doit
while stream.block_index lastindex(stream.blocks)
while stream.block_index < stream.last_block_index
@inbounds block = stream.blocks[stream.block_index]
if is_eof_block(block.compressed_block) # Note: `read_blocks!` does not necessarily fill/overwrite blocks till `lastindex(stream.blocks)`, we need to stop incrementing `stream.block_index` when an eof block is encountered.
break
end
if block.position block.size
return stream.block_index
end
stream.block_index += 1
end
if stream.block_index == stream.last_block_index
@inbounds block = stream.blocks[stream.block_index]
if block.position block.size
return stream.block_index
end
end
if !eof(stream.io)
read_blocks!(stream)
@goto doit
Expand Down Expand Up @@ -364,6 +370,7 @@ function read_blocks!(stream)
end
while n_blocks < length(stream.blocks) && !eof(stream.io)
block = stream.blocks[n_blocks += 1]
stream.last_block_index = n_blocks
if has_position
block.block_offset = position(stream.io)
end
Expand Down
2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ end
@test stream.blocks[1].size == BGZFStreams.BGZF_SAFE_BLOCK_SIZE

# Generate n blocks of data.
data = rand(0x00:0xf0, (n*BGZFStreams.BGZF_SAFE_BLOCK_SIZE) )
data = rand(0x00:0xf0, ((n+1)*BGZFStreams.BGZF_SAFE_BLOCK_SIZE) )

write_offsets = BGZFStreams.VirtualOffset[]

Expand Down

0 comments on commit 8a5369c

Please sign in to comment.