Skip to content

Commit

Permalink
Set default cache type to "readahead" (#678)
Browse files Browse the repository at this point in the history
See analysis by @sneha5gsm in #677
  • Loading branch information
mrocklin committed Dec 31, 2022
1 parent f57f9d8 commit 804057f
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 6 deletions.
8 changes: 4 additions & 4 deletions s3fs/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,10 +205,10 @@ class S3FileSystem(AsyncFileSystem):
default_fill_cache : Bool (True)
Whether to use cache filling with open by default. Refer to
``S3File.open``.
default_cache_type : string ('bytes')
default_cache_type : string ("readahead")
If given, the default cache_type value used for ``open()``. Set to "none"
if no caching is desired. See fsspec's documentation for other available
cache_type values. Default cache_type is 'bytes'.
cache_type values. Default cache_type is "readahead".
version_aware : bool (False)
Whether to support bucket versioning. If enable this will require the
user to have the necessary IAM permissions for dealing with versioned
Expand Down Expand Up @@ -265,7 +265,7 @@ def __init__(
requester_pays=False,
default_block_size=None,
default_fill_cache=True,
default_cache_type="bytes",
default_cache_type="readahead",
version_aware=False,
config_kwargs=None,
s3_additional_kwargs=None,
Expand Down Expand Up @@ -1972,7 +1972,7 @@ def __init__(
fill_cache=True,
s3_additional_kwargs=None,
autocommit=True,
cache_type="bytes",
cache_type="readahead",
requester_pays=False,
cache_options=None,
):
Expand Down
4 changes: 2 additions & 2 deletions s3fs/tests/test_s3fs.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def test_simple(s3):
assert out == data


@pytest.mark.parametrize("default_cache_type", ["none", "bytes", "mmap"])
@pytest.mark.parametrize("default_cache_type", ["none", "bytes", "mmap", "readahead"])
def test_default_cache_type(s3, default_cache_type):
data = b"a" * (10 * 2**20)
s3 = S3FileSystem(
Expand Down Expand Up @@ -1024,7 +1024,7 @@ async def head_object(*args, **kwargs):

def test_read_small(s3):
fn = test_bucket_name + "/2014-01-01.csv"
with s3.open(fn, "rb", block_size=10) as f:
with s3.open(fn, "rb", block_size=10, cache_type="bytes") as f:
out = []
while True:
data = f.read(3)
Expand Down

0 comments on commit 804057f

Please sign in to comment.