Skip to content

Commit 1d46f01

Browse files
committed
remove breaking test
1 parent 67adce8 commit 1d46f01

File tree

1 file changed

+1
-187
lines changed

1 file changed

+1
-187
lines changed

src/storage/mod.rs

Lines changed: 1 addition & 187 deletions
Original file line numberDiff line numberDiff line change
@@ -1264,7 +1264,7 @@ pub(crate) fn source_archive_path(name: &str, version: &Version) -> String {
12641264
#[cfg(test)]
12651265
mod test {
12661266
use super::*;
1267-
use crate::test::{TestEnvironment, V0_1};
1267+
use crate::test::TestEnvironment;
12681268
use std::env;
12691269
use test_case::test_case;
12701270

@@ -1479,192 +1479,6 @@ mod test {
14791479
assert_eq!(detected_mime, expected_mime);
14801480
}
14811481

1482-
#[tokio::test(flavor = "multi_thread")]
1483-
async fn test_recompress_just_check() -> Result<()> {
1484-
let env = TestEnvironment::with_config(
1485-
TestEnvironment::base_config()
1486-
.storage_backend(StorageKind::S3)
1487-
.build()?,
1488-
)
1489-
.await?;
1490-
1491-
let storage = env.async_storage();
1492-
1493-
const KRATE: &str = "test_crate";
1494-
let rid = env
1495-
.fake_release()
1496-
.await
1497-
.name(KRATE)
1498-
.version(V0_1)
1499-
.archive_storage(true)
1500-
.keywords(vec!["kw 1".into(), "kw 2".into()])
1501-
.create()
1502-
.await?;
1503-
1504-
// run the recompression logic
1505-
let mut conn = env.async_db().async_conn().await;
1506-
let (checked, recompressed) = storage
1507-
.recompress_index_files_in_bucket(&mut conn, None, None, None)
1508-
.await?;
1509-
assert_eq!(checked, 2);
1510-
assert_eq!(recompressed, 0);
1511-
1512-
assert!(
1513-
storage
1514-
.get(&rustdoc_archive_path(KRATE, &V0_1), usize::MAX)
1515-
.await
1516-
.is_ok()
1517-
);
1518-
assert!(
1519-
storage
1520-
.get(&source_archive_path(KRATE, &V0_1), usize::MAX)
1521-
.await
1522-
.is_ok()
1523-
);
1524-
1525-
// release-id-min = the target release id for the iterator
1526-
// (we start at the latest, and go down).
1527-
// So setting that "target" to rid.0 + 1 means we stop before we hit our only release.
1528-
let (checked, recompressed) = storage
1529-
.recompress_index_files_in_bucket(&mut conn, Some(ReleaseId(rid.0 + 1)), None, None)
1530-
.await?;
1531-
assert_eq!(checked, 0);
1532-
assert_eq!(recompressed, 0);
1533-
1534-
// release-id-max = where we start iterating the releases
1535-
// (we start at the max, and go down).
1536-
// So setting that "start" to rid.0 - 1 means we start behind our only release
1537-
let (checked, recompressed) = storage
1538-
.recompress_index_files_in_bucket(&mut conn, None, Some(ReleaseId(rid.0 - 1)), None)
1539-
.await?;
1540-
assert_eq!(checked, 0);
1541-
assert_eq!(recompressed, 0);
1542-
1543-
// setting min & max to the same value that is also our only release
1544-
// tests if we filter as inclusive range.
1545-
let (checked, recompressed) = storage
1546-
.recompress_index_files_in_bucket(&mut conn, Some(rid), Some(rid), None)
1547-
.await?;
1548-
assert_eq!(checked, 2);
1549-
assert_eq!(recompressed, 0);
1550-
1551-
Ok(())
1552-
}
1553-
1554-
#[tokio::test(flavor = "multi_thread")]
1555-
async fn test_recompress_index_files_in_bucket() -> Result<()> {
1556-
use std::io::Cursor;
1557-
use tokio::io;
1558-
1559-
let env = TestEnvironment::with_config(
1560-
TestEnvironment::base_config()
1561-
.storage_backend(StorageKind::S3)
1562-
.build()?,
1563-
)
1564-
.await?;
1565-
1566-
const CONTENT: &[u8] = b"Hello, world! Hello, world! Hello, world! Hello, world!";
1567-
let alg = Some(CompressionAlgorithm::Zstd);
1568-
1569-
use async_compression::tokio::write;
1570-
1571-
let broken_archive = {
1572-
// broken compression implementation, `.shutdown` missing.
1573-
let mut buf = Vec::new();
1574-
let mut enc = write::ZstdEncoder::new(&mut buf);
1575-
io::copy(&mut Cursor::new(CONTENT), &mut enc).await?;
1576-
// check if it's really broken, EOF missing
1577-
assert_ne!(buf.last_chunk::<3>().unwrap(), &ZSTD_EOF_BYTES);
1578-
buf
1579-
};
1580-
1581-
const KRATE: &str = "test_crate";
1582-
env.fake_release()
1583-
.await
1584-
.name(KRATE)
1585-
.version(V0_1)
1586-
.archive_storage(true)
1587-
.keywords(vec!["kw 1".into(), "kw 2".into()])
1588-
.create()
1589-
.await?;
1590-
1591-
let storage = env.async_storage();
1592-
// delete everything in storage created by the fake_release above
1593-
for p in &["rustdoc/", "sources/"] {
1594-
storage.delete_prefix(p).await?;
1595-
}
1596-
1597-
// use raw inner storage backend so we can fetch the compressed file without automatic
1598-
// decompression
1599-
let StorageBackend::S3(raw_storage) = &storage.backend else {
1600-
panic!("S3 backend set above");
1601-
};
1602-
1603-
let index_path = format!("{}.index", rustdoc_archive_path(KRATE, &V0_1));
1604-
1605-
// upload as-is to the storage, into the place of an archive index.
1606-
// `.store_inner` doesn't compress
1607-
storage
1608-
.store_inner(vec![Blob {
1609-
path: index_path.clone(),
1610-
mime: mime::APPLICATION_OCTET_STREAM,
1611-
date_updated: Utc::now(),
1612-
content: broken_archive.clone(),
1613-
compression: alg,
1614-
}])
1615-
.await?;
1616-
1617-
// validate how the old compressed blob looks like, even though we just uploaded it
1618-
let old_compressed_blob = raw_storage
1619-
.get_stream(&index_path, None)
1620-
.await?
1621-
.materialize(usize::MAX)
1622-
.await?;
1623-
assert_eq!(old_compressed_blob.compression, alg);
1624-
1625-
// try getting the decompressed broken blob via normal storage API.
1626-
// old async-compression can do this without choking.
1627-
assert_eq!(
1628-
CONTENT,
1629-
&storage.get(&index_path, usize::MAX).await?.content
1630-
);
1631-
1632-
// run the recompression logic
1633-
let mut conn = env.async_db().async_conn().await;
1634-
let (checked, recompressed) = storage
1635-
.recompress_index_files_in_bucket(&mut conn, None, None, None)
1636-
.await?;
1637-
assert_eq!(checked, 1);
1638-
assert_eq!(recompressed, 1);
1639-
1640-
let new_compressed_blob = raw_storage
1641-
.get_stream(&index_path, None)
1642-
.await?
1643-
.materialize(usize::MAX)
1644-
.await?;
1645-
assert_eq!(new_compressed_blob.compression, alg);
1646-
1647-
// after fixing, getting the decompressed blob via normal storage API still works
1648-
assert_eq!(
1649-
CONTENT,
1650-
&storage.get(&index_path, usize::MAX).await?.content
1651-
);
1652-
1653-
// after recompression the content length should be different, 3 bytes more for
1654-
// the zstd EOF
1655-
assert_eq!(
1656-
new_compressed_blob.content.len(),
1657-
old_compressed_blob.content.len() + ZSTD_EOF_BYTES.len()
1658-
);
1659-
1660-
assert_eq!(
1661-
[&old_compressed_blob.content[..], &ZSTD_EOF_BYTES].concat(),
1662-
new_compressed_blob.content
1663-
);
1664-
1665-
Ok(())
1666-
}
1667-
16681482
#[tokio::test(flavor = "multi_thread")]
16691483
async fn test_outdated_local_archive_index_gets_redownloaded() -> Result<()> {
16701484
use tokio::fs;

0 commit comments

Comments
 (0)