Skip to content

Commit

Permalink
Disable cache fragmentation except for progressive
Browse files Browse the repository at this point in the history
DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION is added to indicate to the
cache when fragmentation is allowed. This flag is set for progressive
requests only.

To avoid breaking changes, CacheDataSink defaults to ignoring the flag
(and enabling fragmentation) for now. Respecting the flag can be
enabled manually. DownloaderConstructorHelper enables respecting of
the flag.

Issue: #4253
PiperOrigin-RevId: 229176835
  • Loading branch information
ojw28 committed Jan 14, 2019
1 parent 86637fa commit 1b62277
Show file tree
Hide file tree
Showing 11 changed files with 127 additions and 66 deletions.
2 changes: 2 additions & 0 deletions RELEASENOTES.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
* Fix issue with reusing a `ClippingMediaSource` with an inner
`ExtractorMediaSource` and a non-zero start position
([#5351](https://github.com/google/ExoPlayer/issues/5351)).
* Downloading/Caching: Improve cache performance
([#4253](https://github.com/google/ExoPlayer/issues/4253)).

### 2.9.3 ###

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,16 +109,18 @@ public DownloaderConstructorHelper(
cacheReadDataSourceFactory != null
? cacheReadDataSourceFactory
: new FileDataSourceFactory();
DataSink.Factory writeDataSinkFactory =
cacheWriteDataSinkFactory != null
? cacheWriteDataSinkFactory
: new CacheDataSinkFactory(cache, CacheDataSink.DEFAULT_MAX_CACHE_FILE_SIZE);
if (cacheWriteDataSinkFactory == null) {
CacheDataSinkFactory factory =
new CacheDataSinkFactory(cache, CacheDataSink.DEFAULT_FRAGMENT_SIZE);
factory.experimental_setRespectCacheFragmentationFlag(true);
cacheWriteDataSinkFactory = factory;
}
onlineCacheDataSourceFactory =
new CacheDataSourceFactory(
cache,
upstreamFactory,
readDataSourceFactory,
writeDataSinkFactory,
cacheWriteDataSinkFactory,
CacheDataSource.FLAG_BLOCK_ON_CACHE,
/* eventListener= */ null,
cacheKeyFactory);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,11 @@ public ProgressiveDownloader(
Uri uri, @Nullable String customCacheKey, DownloaderConstructorHelper constructorHelper) {
this.dataSpec =
new DataSpec(
uri, /* absoluteStreamPosition= */ 0, C.LENGTH_UNSET, customCacheKey, /* flags= */ 0);
uri,
/* absoluteStreamPosition= */ 0,
C.LENGTH_UNSET,
customCacheKey,
/* flags= */ DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION);
this.cache = constructorHelper.getCache();
this.dataSource = constructorHelper.createCacheDataSource();
this.cacheKeyFactory = constructorHelper.getCacheKeyFactory();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -988,7 +988,9 @@ private DataSpec buildDataSpec(long position) {
position,
C.LENGTH_UNSET,
customCacheKey,
DataSpec.FLAG_ALLOW_ICY_METADATA | DataSpec.FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN);
DataSpec.FLAG_ALLOW_ICY_METADATA
| DataSpec.FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN
| DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION);
}

private void setLoadPosition(long position, long timeUs) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,19 @@ public final class DataSpec {

/**
* The flags that apply to any request for data. Possible flag values are {@link
* #FLAG_ALLOW_GZIP}, {@link #FLAG_ALLOW_ICY_METADATA} and {@link
* #FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN}.
* #FLAG_ALLOW_GZIP}, {@link #FLAG_ALLOW_ICY_METADATA}, {@link #FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN}
* and {@link #FLAG_ALLOW_CACHE_FRAGMENTATION}.
*/
@Documented
@Retention(RetentionPolicy.SOURCE)
@IntDef(
flag = true,
value = {FLAG_ALLOW_GZIP, FLAG_ALLOW_ICY_METADATA, FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN})
value = {
FLAG_ALLOW_GZIP,
FLAG_ALLOW_ICY_METADATA,
FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN,
FLAG_ALLOW_CACHE_FRAGMENTATION
})
public @interface Flags {}
/**
* Allows an underlying network stack to request that the server use gzip compression.
Expand All @@ -53,12 +58,17 @@ public final class DataSpec {
* DataSource#read(byte[], int, int)} will be the decompressed data.
*/
public static final int FLAG_ALLOW_GZIP = 1;

/** Allows an underlying network stack to request that the stream contain ICY metadata. */
public static final int FLAG_ALLOW_ICY_METADATA = 1 << 1; // 2

/** Prevents caching if the length cannot be resolved when the {@link DataSource} is opened. */
public static final int FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN = 1 << 2; // 4
/**
* Allows fragmentation of this request into multiple cache files, meaning a cache eviction policy
* will be able to evict individual fragments of the data. Depending on the cache implementation,
* setting this flag may also enable more concurrent access to the data (e.g. reading one fragment
* whilst writing another).
*/
public static final int FLAG_ALLOW_CACHE_FRAGMENTATION = 1 << 4; // 8

/**
* The set of HTTP methods that are supported by ExoPlayer {@link HttpDataSource}s. One of {@link
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,22 @@
*/
public final class CacheDataSink implements DataSink {

/** Default {@code maxCacheFileSize} recommended for caching use cases. */
public static final long DEFAULT_MAX_CACHE_FILE_SIZE = 5 * 1024 * 1024;
/** Default {@code fragmentSize} recommended for caching use cases. */
public static final long DEFAULT_FRAGMENT_SIZE = 5 * 1024 * 1024;
/** Default buffer size in bytes. */
public static final int DEFAULT_BUFFER_SIZE = 20 * 1024;

private static final long MIN_RECOMMENDED_MAX_CACHE_FILE_SIZE = 2 * 1024 * 1024;
private static final long MIN_RECOMMENDED_FRAGMENT_SIZE = 2 * 1024 * 1024;
private static final String TAG = "CacheDataSink";

private final Cache cache;
private final long maxCacheFileSize;
private final long fragmentSize;
private final int bufferSize;

private boolean syncFileDescriptor;
private boolean respectCacheFragmentationFlag;
private DataSpec dataSpec;
private long dataSpecFragmentSize;
private File file;
private OutputStream outputStream;
private FileOutputStream underlyingFileOutputStream;
Expand All @@ -73,58 +75,68 @@ public CacheDataSinkException(IOException cause) {
* Constructs an instance using {@link #DEFAULT_BUFFER_SIZE}.
*
* @param cache The cache into which data should be written.
* @param maxCacheFileSize The maximum size of a cache file, in bytes. If a request results in
* data being written whose size exceeds this value, then the data will be fragmented into
* multiple cache files. If set to {@link C#LENGTH_UNSET} then no fragmentation will occur.
* Using a small value allows for finer-grained cache eviction policies, at the cost of
* increased overhead both on the cache implementation and the file system. Values under
* {@code (2 * 1024 * 1024)} are not recommended.
* @param fragmentSize For requests that should be fragmented into multiple cache files, this is
* the maximum size of a cache file in bytes. If set to {@link C#LENGTH_UNSET} then no
* fragmentation will occur. Using a small value allows for finer-grained cache eviction
* policies, at the cost of increased overhead both on the cache implementation and the file
* system. Values under {@code (2 * 1024 * 1024)} are not recommended.
*/
public CacheDataSink(Cache cache, long maxCacheFileSize) {
this(cache, maxCacheFileSize, DEFAULT_BUFFER_SIZE);
public CacheDataSink(Cache cache, long fragmentSize) {
this(cache, fragmentSize, DEFAULT_BUFFER_SIZE);
}

/**
* @param cache The cache into which data should be written.
* @param maxCacheFileSize The maximum size of a cache file, in bytes. If a request results in
* data being written whose size exceeds this value, then the data will be fragmented into
* multiple cache files. If set to {@link C#LENGTH_UNSET} then no fragmentation will occur.
* Using a small value allows for finer-grained cache eviction policies, at the cost of
* increased overhead both on the cache implementation and the file system. Values under
* {@code (2 * 1024 * 1024)} are not recommended.
* @param fragmentSize For requests that should be fragmented into multiple cache files, this is
* the maximum size of a cache file in bytes. If set to {@link C#LENGTH_UNSET} then no
* fragmentation will occur. Using a small value allows for finer-grained cache eviction
* policies, at the cost of increased overhead both on the cache implementation and the file
* system. Values under {@code (2 * 1024 * 1024)} are not recommended.
* @param bufferSize The buffer size in bytes for writing to a cache file. A zero or negative
* value disables buffering.
*/
public CacheDataSink(Cache cache, long maxCacheFileSize, int bufferSize) {
public CacheDataSink(Cache cache, long fragmentSize, int bufferSize) {
Assertions.checkState(
maxCacheFileSize > 0 || maxCacheFileSize == C.LENGTH_UNSET,
"maxCacheFileSize must be positive or C.LENGTH_UNSET.");
if (maxCacheFileSize != C.LENGTH_UNSET
&& maxCacheFileSize < MIN_RECOMMENDED_MAX_CACHE_FILE_SIZE) {
fragmentSize > 0 || fragmentSize == C.LENGTH_UNSET,
"fragmentSize must be positive or C.LENGTH_UNSET.");
if (fragmentSize != C.LENGTH_UNSET && fragmentSize < MIN_RECOMMENDED_FRAGMENT_SIZE) {
Log.w(
TAG,
"maxCacheFileSize is below the minimum recommended value of "
+ MIN_RECOMMENDED_MAX_CACHE_FILE_SIZE
"fragmentSize is below the minimum recommended value of "
+ MIN_RECOMMENDED_FRAGMENT_SIZE
+ ". This may cause poor cache performance.");
}
this.cache = Assertions.checkNotNull(cache);
this.maxCacheFileSize = maxCacheFileSize == C.LENGTH_UNSET ? Long.MAX_VALUE : maxCacheFileSize;
this.fragmentSize = fragmentSize == C.LENGTH_UNSET ? Long.MAX_VALUE : fragmentSize;
this.bufferSize = bufferSize;
syncFileDescriptor = true;
}

/**
* Sets whether file descriptors are synced when closing output streams.
*
* <p>This method is experimental, and will be renamed or removed in a future release. It should
* only be called before the renderer is used.
* <p>This method is experimental, and will be renamed or removed in a future release.
*
* @param syncFileDescriptor Whether file descriptors are synced when closing output streams.
*/
public void experimental_setSyncFileDescriptor(boolean syncFileDescriptor) {
this.syncFileDescriptor = syncFileDescriptor;
}

/**
* Sets whether this instance respects the {@link DataSpec#FLAG_ALLOW_CACHE_FRAGMENTATION} flag.
* If set to {@code false} requests will always be fragmented. If set to {@code true} requests
* will be fragmented only if the flag is set.
*
* <p>This method is experimental, and will be renamed or removed in a future release.
*
* @param respectCacheFragmentationFlag Whether to respect the {@link
* DataSpec#FLAG_ALLOW_CACHE_FRAGMENTATION} flag.
*/
public void experimental_setRespectCacheFragmentationFlag(boolean respectCacheFragmentationFlag) {
this.respectCacheFragmentationFlag = respectCacheFragmentationFlag;
}

@Override
public void open(DataSpec dataSpec) throws CacheDataSinkException {
if (dataSpec.length == C.LENGTH_UNSET
Expand All @@ -133,6 +145,11 @@ public void open(DataSpec dataSpec) throws CacheDataSinkException {
return;
}
this.dataSpec = dataSpec;
this.dataSpecFragmentSize =
!respectCacheFragmentationFlag
|| dataSpec.isFlagSet(DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION)
? fragmentSize
: Long.MAX_VALUE;
dataSpecBytesWritten = 0;
try {
openNextOutputStream();
Expand All @@ -149,12 +166,12 @@ public void write(byte[] buffer, int offset, int length) throws CacheDataSinkExc
try {
int bytesWritten = 0;
while (bytesWritten < length) {
if (outputStreamBytesWritten == maxCacheFileSize) {
if (outputStreamBytesWritten == dataSpecFragmentSize) {
closeCurrentOutputStream();
openNextOutputStream();
}
int bytesToWrite = (int) Math.min(length - bytesWritten,
maxCacheFileSize - outputStreamBytesWritten);
int bytesToWrite =
(int) Math.min(length - bytesWritten, dataSpecFragmentSize - outputStreamBytesWritten);
outputStream.write(buffer, offset + bytesWritten, bytesToWrite);
bytesWritten += bytesToWrite;
outputStreamBytesWritten += bytesToWrite;
Expand All @@ -181,7 +198,7 @@ private void openNextOutputStream() throws IOException {
long length =
dataSpec.length == C.LENGTH_UNSET
? C.LENGTH_UNSET
: Math.min(dataSpec.length - dataSpecBytesWritten, maxCacheFileSize);
: Math.min(dataSpec.length - dataSpecBytesWritten, dataSpecFragmentSize);
file =
cache.startFile(
dataSpec.key, dataSpec.absoluteStreamPosition + dataSpecBytesWritten, length);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,27 +23,50 @@
public final class CacheDataSinkFactory implements DataSink.Factory {

private final Cache cache;
private final long maxCacheFileSize;
private final long fragmentSize;
private final int bufferSize;

private boolean syncFileDescriptor;
private boolean respectCacheFragmentationFlag;

/** @see CacheDataSink#CacheDataSink(Cache, long) */
public CacheDataSinkFactory(Cache cache, long fragmentSize) {
this(cache, fragmentSize, CacheDataSink.DEFAULT_BUFFER_SIZE);
}

/** @see CacheDataSink#CacheDataSink(Cache, long, int) */
public CacheDataSinkFactory(Cache cache, long fragmentSize, int bufferSize) {
this.cache = cache;
this.fragmentSize = fragmentSize;
this.bufferSize = bufferSize;
}

/**
* @see CacheDataSink#CacheDataSink(Cache, long)
* See {@link CacheDataSink#experimental_setSyncFileDescriptor(boolean)}.
*
* <p>This method is experimental, and will be renamed or removed in a future release.
*/
public CacheDataSinkFactory(Cache cache, long maxCacheFileSize) {
this(cache, maxCacheFileSize, CacheDataSink.DEFAULT_BUFFER_SIZE);
public CacheDataSinkFactory experimental_setSyncFileDescriptor(boolean syncFileDescriptor) {
this.syncFileDescriptor = syncFileDescriptor;
return this;
}

/**
* @see CacheDataSink#CacheDataSink(Cache, long, int)
* See {@link CacheDataSink#experimental_setRespectCacheFragmentationFlag(boolean)}.
*
* <p>This method is experimental, and will be renamed or removed in a future release.
*/
public CacheDataSinkFactory(Cache cache, long maxCacheFileSize, int bufferSize) {
this.cache = cache;
this.maxCacheFileSize = maxCacheFileSize;
this.bufferSize = bufferSize;
public CacheDataSinkFactory experimental_setRespectCacheFragmentationFlag(
boolean respectCacheFragmentationFlag) {
this.respectCacheFragmentationFlag = respectCacheFragmentationFlag;
return this;
}

@Override
public DataSink createDataSink() {
return new CacheDataSink(cache, maxCacheFileSize, bufferSize);
CacheDataSink dataSink = new CacheDataSink(cache, fragmentSize, bufferSize);
dataSink.experimental_setSyncFileDescriptor(syncFileDescriptor);
dataSink.experimental_setRespectCacheFragmentationFlag(respectCacheFragmentationFlag);
return dataSink;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ public CacheDataSource(Cache cache, DataSource upstream, @Flags int flags) {
cache,
upstream,
new FileDataSource(),
new CacheDataSink(cache, CacheDataSink.DEFAULT_MAX_CACHE_FILE_SIZE),
new CacheDataSink(cache, CacheDataSink.DEFAULT_FRAGMENT_SIZE),
flags,
/* eventListener= */ null);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ public CacheDataSourceFactory(
cache,
upstreamFactory,
new FileDataSourceFactory(),
new CacheDataSinkFactory(cache, CacheDataSink.DEFAULT_MAX_CACHE_FILE_SIZE),
new CacheDataSinkFactory(cache, CacheDataSink.DEFAULT_FRAGMENT_SIZE),
flags,
/* eventListener= */ null);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
public final class CacheDataSourceTest {

private static final byte[] TEST_DATA = new byte[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
private static final int MAX_CACHE_FILE_SIZE = 3;
private static final int CACHE_FRAGMENT_SIZE = 3;
private static final String DATASPEC_KEY = "dataSpecKey";

private Uri testDataUri;
Expand Down Expand Up @@ -81,13 +81,13 @@ public void tearDown() throws Exception {
}

@Test
public void testMaxCacheFileSize() throws Exception {
public void testFragmentSize() throws Exception {
CacheDataSource cacheDataSource = createCacheDataSource(false, false);
assertReadDataContentLength(cacheDataSource, boundedDataSpec, false, false);
for (String key : cache.getKeys()) {
for (CacheSpan cacheSpan : cache.getCachedSpans(key)) {
assertThat(cacheSpan.length <= MAX_CACHE_FILE_SIZE).isTrue();
assertThat(cacheSpan.file.length() <= MAX_CACHE_FILE_SIZE).isTrue();
assertThat(cacheSpan.length <= CACHE_FRAGMENT_SIZE).isTrue();
assertThat(cacheSpan.file.length() <= CACHE_FRAGMENT_SIZE).isTrue();
}
}
}
Expand Down Expand Up @@ -548,14 +548,14 @@ private CacheDataSource createCacheDataSource(
setReadException,
unknownLength,
CacheDataSource.FLAG_BLOCK_ON_CACHE,
new CacheDataSink(cache, MAX_CACHE_FILE_SIZE),
new CacheDataSink(cache, CACHE_FRAGMENT_SIZE),
cacheKeyFactory);
}

private CacheDataSource createCacheDataSource(
boolean setReadException, boolean unknownLength, @CacheDataSource.Flags int flags) {
return createCacheDataSource(
setReadException, unknownLength, flags, new CacheDataSink(cache, MAX_CACHE_FILE_SIZE));
setReadException, unknownLength, flags, new CacheDataSink(cache, CACHE_FRAGMENT_SIZE));
}

private CacheDataSource createCacheDataSource(
Expand Down Expand Up @@ -602,6 +602,7 @@ private DataSpec buildDataSpec(long position, long length) {
}

private DataSpec buildDataSpec(long position, long length, @Nullable String key) {
return new DataSpec(testDataUri, position, length, key);
return new DataSpec(
testDataUri, position, length, key, DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION);
}
}
Loading

0 comments on commit 1b62277

Please sign in to comment.