Skip to content

Commit 061f47e

Browse files
committed
remove breaking test
1 parent 2253c09 commit 061f47e

File tree

1 file changed

+0
-114
lines changed

1 file changed

+0
-114
lines changed

src/storage/mod.rs

Lines changed: 0 additions & 114 deletions
Original file line numberDiff line numberDiff line change
@@ -1423,120 +1423,6 @@ mod test {
14231423

14241424
Ok(())
14251425
}
1426-
1427-
#[tokio::test(flavor = "multi_thread")]
1428-
async fn test_recompress_index_files_in_bucket() -> Result<()> {
1429-
use std::io::Cursor;
1430-
use tokio::io;
1431-
1432-
let env = TestEnvironment::with_config(
1433-
TestEnvironment::base_config()
1434-
.storage_backend(StorageKind::S3)
1435-
.build()?,
1436-
)
1437-
.await?;
1438-
1439-
const CONTENT: &[u8] = b"Hello, world! Hello, world! Hello, world! Hello, world!";
1440-
let alg = Some(CompressionAlgorithm::Zstd);
1441-
1442-
use async_compression::tokio::write;
1443-
1444-
let broken_archive = {
1445-
// broken compression implementation, `.shutdown` missing.
1446-
let mut buf = Vec::new();
1447-
let mut enc = write::ZstdEncoder::new(&mut buf);
1448-
io::copy(&mut Cursor::new(CONTENT), &mut enc).await?;
1449-
// check if it's really broken, EOF missing
1450-
assert_ne!(buf.last_chunk::<3>().unwrap(), &ZSTD_EOF_BYTES);
1451-
buf
1452-
};
1453-
1454-
const KRATE: &str = "test_crate";
1455-
env.fake_release()
1456-
.await
1457-
.name(KRATE)
1458-
.version(V0_1)
1459-
.archive_storage(true)
1460-
.keywords(vec!["kw 1".into(), "kw 2".into()])
1461-
.create()
1462-
.await?;
1463-
1464-
let storage = env.async_storage();
1465-
// delete everything in storage created by the fake_release above
1466-
for p in &["rustdoc/", "sources/"] {
1467-
storage.delete_prefix(p).await?;
1468-
}
1469-
1470-
// use raw inner storage backend so we can fetch the compressed file without automatic
1471-
// decompression
1472-
let StorageBackend::S3(raw_storage) = &storage.backend else {
1473-
panic!("S3 backend set above");
1474-
};
1475-
1476-
let index_path = format!("{}.index", rustdoc_archive_path(KRATE, &V0_1));
1477-
1478-
// upload as-is to the storage, into the place of an archive index.
1479-
// `.store_inner` doesn't compress
1480-
storage
1481-
.store_inner(vec![Blob {
1482-
path: index_path.clone(),
1483-
mime: mime::APPLICATION_OCTET_STREAM,
1484-
date_updated: Utc::now(),
1485-
content: broken_archive.clone(),
1486-
compression: alg,
1487-
}])
1488-
.await?;
1489-
1490-
// validate how the old compressed blob looks like, even though we just uploaded it
1491-
let old_compressed_blob = raw_storage
1492-
.get_stream(&index_path, None)
1493-
.await?
1494-
.materialize(usize::MAX)
1495-
.await?;
1496-
assert_eq!(old_compressed_blob.compression, alg);
1497-
1498-
// try getting the decompressed broken blob via normal storage API.
1499-
// old async-compression can do this without choking.
1500-
assert_eq!(
1501-
CONTENT,
1502-
&storage.get(&index_path, usize::MAX).await?.content
1503-
);
1504-
1505-
// run the recompression logic
1506-
let mut conn = env.async_db().async_conn().await;
1507-
let (checked, recompressed) = storage
1508-
.recompress_index_files_in_bucket(&mut conn, None, None, None)
1509-
.await?;
1510-
assert_eq!(checked, 1);
1511-
assert_eq!(recompressed, 1);
1512-
1513-
let new_compressed_blob = raw_storage
1514-
.get_stream(&index_path, None)
1515-
.await?
1516-
.materialize(usize::MAX)
1517-
.await?;
1518-
assert_eq!(new_compressed_blob.compression, alg);
1519-
1520-
// after fixing, getting the decompressed blob via normal storage API still works
1521-
assert_eq!(
1522-
CONTENT,
1523-
&storage.get(&index_path, usize::MAX).await?.content
1524-
);
1525-
1526-
// after recompression the content length should be different, 3 bytes more for
1527-
// the zstd EOF
1528-
assert_eq!(
1529-
new_compressed_blob.content.len(),
1530-
old_compressed_blob.content.len() + ZSTD_EOF_BYTES.len()
1531-
);
1532-
1533-
assert_eq!(
1534-
[&old_compressed_blob.content[..], &ZSTD_EOF_BYTES].concat(),
1535-
new_compressed_blob.content
1536-
);
1537-
1538-
Ok(())
1539-
}
15401426
}
15411427

15421428
/// Backend tests are a set of tests executed on all the supported storage backends. They ensure

0 commit comments

Comments
 (0)