diff --git a/Cargo.lock b/Cargo.lock index 64b1cc5cd..88c2c1d8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -344,7 +344,7 @@ dependencies = [ "bisection", "futures", "http-content-range", - "itertools 0.12.1", + "itertools", "memmap2 0.9.4", "reqwest", "reqwest-middleware", @@ -1608,7 +1608,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -1799,15 +1799,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "itertools" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.12.1" @@ -2435,7 +2426,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b645dcde5f119c2c454a92d0dfa271a2a3b205da92e4292a68ead4bdbfde1f33" dependencies = [ "heck", - "itertools 0.12.1", + "itertools", "proc-macro2", "proc-macro2-diagnostics", "quote", @@ -2701,11 +2692,12 @@ dependencies = [ "flate2", "futures", "human_bytes", + "humantime", "indexmap 2.2.2", "indicatif", "insta", "is_executable", - "itertools 0.12.1", + "itertools", "lazy_static", "libc", "miette 7.0.0", @@ -2938,7 +2930,7 @@ dependencies = [ [[package]] name = "rattler" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "anyhow", "async-compression", @@ -2947,11 +2939,12 @@ dependencies = [ "digest", "dirs", "drop_bomb", + "fs-err", "futures", "fxhash", "hex", "indexmap 2.2.2", - "itertools 0.12.1", + "itertools", "memchr", "memmap2 0.9.4", "nom", @@ -2982,14 +2975,14 @@ dependencies = [ [[package]] name = "rattler_conda_types" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "chrono", "fxhash", "glob", "hex", "indexmap 2.2.2", - "itertools 0.12.1", + "itertools", "lazy-regex", "nom", "purl", @@ -3011,7 +3004,7 @@ dependencies = [ [[package]] name = "rattler_digest" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "blake2", "digest", @@ -3052,7 +3045,7 @@ dependencies = [ "http-cache-semantics", "include_dir", "indexmap 2.2.2", - "itertools 0.12.1", + "itertools", "miette 7.0.0", "mime", "once_cell", @@ -3067,7 +3060,7 @@ dependencies = [ "regex", "reqwest", "reqwest-middleware", - "resolvo 0.4.0", + "resolvo", "serde", "serde_json", "serde_with", @@ -3086,12 +3079,12 @@ dependencies = [ [[package]] name = "rattler_lock" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "chrono", "fxhash", "indexmap 2.2.2", - "itertools 0.12.1", + "itertools", "pep440_rs", "pep508_rs", "purl", @@ -3109,7 +3102,7 @@ dependencies = [ [[package]] name = "rattler_macros" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "quote", "syn 2.0.48", @@ -3118,7 +3111,7 @@ dependencies = [ [[package]] name = "rattler_networking" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "anyhow", "async-trait", @@ -3126,7 +3119,7 @@ dependencies = [ "dirs", "fslock", "getrandom", - "itertools 0.12.1", + "itertools", "keyring", "lazy_static", "libc", @@ -3146,12 +3139,12 @@ dependencies = [ [[package]] name = "rattler_package_streaming" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "bzip2", "chrono", "futures-util", - "itertools 0.12.1", + "itertools", "num_cpus", "rattler_conda_types", "rattler_digest", @@ -3172,7 +3165,7 @@ dependencies = [ [[package]] name = "rattler_repodata_gateway" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "anyhow", "async-compression", @@ -3183,7 +3176,7 @@ dependencies = [ "hex", "humansize", "humantime", - "itertools 0.12.1", + "itertools", "json-patch", "libc", "md-5", @@ -3211,11 +3204,11 @@ dependencies = [ [[package]] name = "rattler_shell" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "enum_dispatch", "indexmap 2.2.2", - "itertools 0.12.1", + "itertools", "rattler_conda_types", "serde_json", "shlex", @@ -3228,15 +3221,16 @@ dependencies = [ [[package]] name = "rattler_solve" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "anyhow", "chrono", + "futures", "hex", - "itertools 0.12.1", + "itertools", "rattler_conda_types", "rattler_digest", - "resolvo 0.3.0", + "resolvo", "serde", "tempfile", "thiserror", @@ -3247,7 +3241,7 @@ dependencies = [ [[package]] name = "rattler_virtual_packages" version = "0.17.0" -source = "git+https://github.com/mamba-org/rattler?branch=main#aaf2648579d245dcbaab20a2b0f061d7bcaf016e" +source = "git+https://github.com/mamba-org/rattler?branch=main#4fc2d38ee482aeb154d75cd3423577d3bcc6b3f9" dependencies = [ "cfg-if", "libloading", @@ -3426,19 +3420,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "resolvo" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd163bc7df01195423c83a7a391fecf319ff41d3de899694a9ccb698e790b29" -dependencies = [ - "bitvec", - "elsa", - "itertools 0.11.0", - "petgraph", - "tracing", -] - [[package]] name = "resolvo" version = "0.4.0" @@ -3449,7 +3430,7 @@ dependencies = [ "elsa", "event-listener 5.0.0", "futures", - "itertools 0.12.1", + "itertools", "petgraph", "tokio", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 76975952b..017f04c0f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,7 @@ dunce = "1.0.4" flate2 = "1.0.28" futures = "0.3.30" human_bytes = "0.4.3" +humantime = "2.1.0" indexmap = { version = "2.2.2", features = ["serde"] } indicatif = "0.17.7" insta = { version = "1.34.0", features = ["yaml"] } diff --git a/examples/polarify/pixi.toml b/examples/polarify/pixi.toml index 07ac346fa..3e0ef9fe6 100644 --- a/examples/polarify/pixi.toml +++ b/examples/polarify/pixi.toml @@ -1,7 +1,7 @@ [project] name = "polarify-use-case" channels = ["conda-forge"] -platforms = ["linux-64", "osx-arm64", "osx-64", "win-64"] +platforms = ["linux-64", "osx-arm64", "osx-64"] [tasks] postinstall = "pip install --no-build-isolation --no-deps --disable-pip-version-check -e ." diff --git a/examples/solve-groups/pixi.lock b/examples/solve-groups/pixi.lock index e569c4f51..a28ff84d8 100644 --- a/examples/solve-groups/pixi.lock +++ b/examples/solve-groups/pixi.lock @@ -15,7 +15,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.44.2-h2797004_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.1-h2797004_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda @@ -29,6 +29,10 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/py-rattler-0.2.1-hb6292c7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda + - pypi: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + - pypi: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + - pypi: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + - pypi: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 osx-64: - conda: https://conda.anaconda.org/conda-forge/noarch/py-rattler-0.2.1-hb6292c7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda @@ -36,7 +40,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.2.2-h8857fd0_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libexpat-2.5.0-hf0c8a7f_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.44.2-h92b6c6a_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.45.1-h92b6c6a_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.2.13-h8a1eda9_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/ncurses-6.4-h93d8f39_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.2.1-hd75f5a5_0.conda @@ -46,6 +50,10 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2 + - pypi: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + - pypi: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + - pypi: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + - pypi: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 osx-arm64: - conda: https://conda.anaconda.org/conda-forge/noarch/py-rattler-0.2.1-hb6292c7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda @@ -53,7 +61,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.5.0-hb7217d7_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.44.2-h091b4b1_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.1-h091b4b1_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-h53f4e23_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.4-h463b476_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.2.1-h0d3ecfb_0.conda @@ -63,6 +71,10 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 + - pypi: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + - pypi: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + - pypi: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + - pypi: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 win-64: - conda: https://conda.anaconda.org/conda-forge/noarch/py-rattler-0.2.1-hb6292c7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda @@ -70,7 +82,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/ca-certificates-2024.2.2-h56e8100_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.5.0-h63175ca_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.2-h8ffe710_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.44.2-hcfcfb64_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.45.1-hcfcfb64_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.2.13-hcfcfb64_5.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.2.1-hcfcfb64_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/py_rattler-0.2.1-py312hfccd98a_0.conda @@ -82,6 +94,11 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.38.33130-h82b7239_18.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vs2015_runtime-14.38.33130-hcb4865c_18.conda - conda: https://conda.anaconda.org/conda-forge/win-64/xz-5.2.6-h8d14728_0.tar.bz2 + - pypi: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + - pypi: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + - pypi: https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl#sha256=4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + - pypi: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + - pypi: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 max-py310: channels: - url: https://conda.anaconda.org/conda-forge/ @@ -96,16 +113,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.44.2-h2797004_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.1-h2797004_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.4-h59595ed_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.2.1-hd590300_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/pydantic-core-2.16.2-py310hcb5633a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pydantic-core-2.16.2-py310hcb5633a_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.10.0-h543edf9_3_cpython.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.10-4_cp310.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/sqlite-3.44.2-h2c6b66d_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/sqlite-3.45.1-h2c6b66d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/annotated-types-0.6.0-pyhd8ed1ab_0.conda @@ -122,15 +139,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.2.2-h8857fd0_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.44.2-h92b6c6a_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.45.1-h92b6c6a_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.2.13-h8a1eda9_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/ncurses-6.4-h93d8f39_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.2.1-hd75f5a5_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/pydantic-core-2.16.2-py310h54baaa9_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/pydantic-core-2.16.2-py310h54baaa9_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/python-3.10.0-h38b4d05_3_cpython.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-64/python_abi-3.10-4_cp310.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/sqlite-3.44.2-h7461747_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/sqlite-3.45.1-h7461747_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2 osx-arm64: @@ -142,15 +159,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.44.2-h091b4b1_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.1-h091b4b1_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-h53f4e23_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.4-h463b476_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.2.1-h0d3ecfb_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pydantic-core-2.16.2-py310hd442715_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pydantic-core-2.16.2-py310hd442715_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.10.0-h43b31ca_3_cpython.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.10-4_cp310.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/sqlite-3.44.2-hf2abe2d_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/sqlite-3.45.1-hf2abe2d_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 win-64: @@ -162,13 +179,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-hcfcfb64_5.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ca-certificates-2024.2.2-h56e8100_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.2-h8ffe710_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.44.2-hcfcfb64_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.45.1-hcfcfb64_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.2.13-hcfcfb64_5.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.2.1-hcfcfb64_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/pydantic-core-2.16.2-py310h87d50f1_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/pydantic-core-2.16.2-py310h87d50f1_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.10.0-hcf16a7b_3_cpython.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.10-4_cp310.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/sqlite-3.44.2-hcfcfb64_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/sqlite-3.45.1-hcfcfb64_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h5226925_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.22621.0-h57928b3_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-hcf57466_18.conda @@ -189,7 +206,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.44.2-h2797004_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.1-h2797004_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.4-h59595ed_2.conda @@ -198,18 +215,24 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.10.0-h543edf9_3_cpython.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.10-4_cp310.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/sqlite-3.44.2-h2c6b66d_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/sqlite-3.45.1-h2c6b66d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/py-rattler-0.2.1-hb6292c7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda + - pypi: https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl#sha256=939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc + - pypi: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + - pypi: https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl#sha256=4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 + - pypi: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + - pypi: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + - pypi: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 osx-64: - conda: https://conda.anaconda.org/conda-forge/noarch/py-rattler-0.2.1-hb6292c7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.2.2-h8857fd0_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.44.2-h92b6c6a_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.45.1-h92b6c6a_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.2.13-h8a1eda9_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/ncurses-6.4-h93d8f39_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.2.1-hd75f5a5_0.conda @@ -217,16 +240,22 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/python-3.10.0-h38b4d05_3_cpython.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-64/python_abi-3.10-4_cp310.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/sqlite-3.44.2-h7461747_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/sqlite-3.45.1-h7461747_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2 + - pypi: https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl#sha256=939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc + - pypi: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + - pypi: https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl#sha256=4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 + - pypi: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + - pypi: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + - pypi: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 osx-arm64: - conda: https://conda.anaconda.org/conda-forge/noarch/py-rattler-0.2.1-hb6292c7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.44.2-h091b4b1_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.1-h091b4b1_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-h53f4e23_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.4-h463b476_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.2.1-h0d3ecfb_0.conda @@ -234,28 +263,41 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.10.0-h43b31ca_3_cpython.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.10-4_cp310.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/sqlite-3.44.2-hf2abe2d_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/sqlite-3.45.1-hf2abe2d_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 + - pypi: https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl#sha256=939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc + - pypi: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + - pypi: https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl#sha256=4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 + - pypi: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + - pypi: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + - pypi: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 win-64: - conda: https://conda.anaconda.org/conda-forge/noarch/py-rattler-0.2.1-hb6292c7_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-hcfcfb64_5.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ca-certificates-2024.2.2-h56e8100_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.2-h8ffe710_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.44.2-hcfcfb64_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.45.1-hcfcfb64_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.2.13-hcfcfb64_5.conda - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.2.1-hcfcfb64_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/py_rattler-0.2.1-py310h87d50f1_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.10.0-hcf16a7b_3_cpython.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.10-4_cp310.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/sqlite-3.44.2-hcfcfb64_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/sqlite-3.45.1-hcfcfb64_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h5226925_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.22621.0-h57928b3_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-hcf57466_18.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.38.33130-h82b7239_18.conda - conda: https://conda.anaconda.org/conda-forge/win-64/vs2015_runtime-14.38.33130-hcb4865c_18.conda - conda: https://conda.anaconda.org/conda-forge/win-64/xz-5.2.6-h8d14728_0.tar.bz2 + - pypi: https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl#sha256=939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc + - pypi: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + - pypi: https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl#sha256=4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 + - pypi: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + - pypi: https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl#sha256=4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + - pypi: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + - pypi: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 packages: - kind: conda name: _libgcc_mutex @@ -300,6 +342,8 @@ packages: - typing-extensions >=4.0.0 license: MIT license_family: MIT + purls: + - pkg:pypi/annotated-types size: 17026 timestamp: 1696634393637 - kind: conda @@ -404,6 +448,26 @@ packages: license: ISC size: 155725 timestamp: 1706844034242 +- kind: pypi + name: colorama + version: 0.4.6 + url: https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl#sha256=4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + sha256: 4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + requires_python: '!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7' +- kind: pypi + name: exceptiongroup + version: 1.2.0 + url: https://files.pythonhosted.org/packages/b8/9a/5028fd52db10e600f1c4674441b968cf2ea4959085bfb5b99fb1250e5f68/exceptiongroup-1.2.0-py3-none-any.whl#sha256=4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 + sha256: 4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 + requires_dist: + - pytest >=6 ; extra == 'test' + requires_python: '>=3.7' +- kind: pypi + name: iniconfig + version: 2.0.0 + url: https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl#sha256=b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 + sha256: b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 + requires_python: '>=3.7' - kind: conda name: ld_impl_linux-64 version: '2.40' @@ -586,59 +650,59 @@ packages: timestamp: 1697359010159 - kind: conda name: libsqlite - version: 3.44.2 + version: 3.45.1 build: h091b4b1_0 subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.44.2-h091b4b1_0.conda - sha256: f0dc2fe69eddb4bab72ff6bb0da51d689294f466ee1b01e80ced1e7878a21aa5 - md5: d7e1af696cfadec251a0abdd7b79ed77 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.1-h091b4b1_0.conda + sha256: 64befc456a38907d1334fb58eb604a96625d3a23a2f34fbd203e0b307a4a141e + md5: a153a40a253962373b5330eb9d182da9 depends: - libzlib >=1.2.13,<1.3.0a0 license: Unlicense - size: 815254 - timestamp: 1700863572318 + size: 824677 + timestamp: 1707495428497 - kind: conda name: libsqlite - version: 3.44.2 + version: 3.45.1 build: h2797004_0 subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.44.2-h2797004_0.conda - sha256: ee2c4d724a3ed60d5b458864d66122fb84c6ce1df62f735f90d8db17b66cd88a - md5: 3b6a9f225c3dbe0d24f4fedd4625c5bf + url: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.1-h2797004_0.conda + sha256: 1b379d1c652b25d0540251d422ef767472e768fd36b77261045e97f9ba6d3faa + md5: fc4ccadfbf6d4784de88c41704792562 depends: - libgcc-ng >=12 - libzlib >=1.2.13,<1.3.0a0 license: Unlicense - size: 845830 - timestamp: 1700863204572 + size: 859346 + timestamp: 1707495156652 - kind: conda name: libsqlite - version: 3.44.2 + version: 3.45.1 build: h92b6c6a_0 subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.44.2-h92b6c6a_0.conda - sha256: 8a317d2aa6352feba951ca09d5bf34f565f9dd10bb14ff842b8650baa321d781 - md5: d4419f90019e6a2b152cd4d32f73a82f + url: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.45.1-h92b6c6a_0.conda + sha256: d65ce7093ecf5884b241a5ca8d26f80d21eaebf14ca67923b50c249f47a84cf9 + md5: e451d14a5412cdc68be50493df251f55 depends: - libzlib >=1.2.13,<1.3.0a0 license: Unlicense - size: 891089 - timestamp: 1700863475542 + size: 902313 + timestamp: 1707495366004 - kind: conda name: libsqlite - version: 3.44.2 + version: 3.45.1 build: hcfcfb64_0 subdir: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.44.2-hcfcfb64_0.conda - sha256: 25bfcf79ec863c2c0f0b3599981e2eac57efc5302faf2bb84f68c3f0faa55d1c - md5: 4a5f5ab56cbf3ccd08d71a1168061213 + url: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.45.1-hcfcfb64_0.conda + sha256: e1010f4ac7b056d85d91e6cb6137ef118f920eba88059261689e543780b230df + md5: c583c1d6999b7aa148eff3089e13c44b depends: - ucrt >=10.0.20348.0 - vc >=14.2,<15 - vc14_runtime >=14.29.30139 license: Unlicense - size: 853171 - timestamp: 1700863704859 + size: 870045 + timestamp: 1707495642340 - kind: conda name: libuuid version: 2.38.1 @@ -843,6 +907,23 @@ packages: license_family: Apache size: 2509168 timestamp: 1706636810736 +- kind: pypi + name: packaging + version: '23.2' + url: https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl#sha256=8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + sha256: 8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 + requires_python: '>=3.7' +- kind: pypi + name: pluggy + version: 1.4.0 + url: https://files.pythonhosted.org/packages/a5/5b/0cc789b59e8cc1bf288b38111d002d8c5917123194d45b29dcdac64723cc/pluggy-1.4.0-py3-none-any.whl#sha256=7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + sha256: 7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 + requires_dist: + - pre-commit ; extra == 'dev' + - tox ; extra == 'dev' + - pytest ; extra == 'testing' + - pytest-benchmark ; extra == 'testing' + requires_python: '>=3.8' - kind: conda name: py-rattler version: 0.2.1 @@ -1010,16 +1091,19 @@ packages: - typing-extensions >=4.6.1 license: MIT license_family: MIT + purls: + - pkg:pypi/pydantic size: 271768 timestamp: 1707149490082 - kind: conda name: pydantic-core version: 2.16.2 - build: py310h54baaa9_0 + build: py310h54baaa9_1 + build_number: 1 subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/pydantic-core-2.16.2-py310h54baaa9_0.conda - sha256: 89ae81ac9bdb3e3940328bfe4689553bb4796939eb2970788dee583da43c608a - md5: 6e195e5d56fadfa9fd189df2c5853466 + url: https://conda.anaconda.org/conda-forge/osx-64/pydantic-core-2.16.2-py310h54baaa9_1.conda + sha256: 6e3a0b15d5824ac319c7d711f2bbfad57e5046c351968d7a1ac81b69f40c804f + md5: 04ef1f2653b53c0d46cb8f5e8d786e0a depends: - python >=3.10,<3.11.0a0 - python_abi 3.10.* *_cp310 @@ -1028,16 +1112,19 @@ packages: - __osx >=10.12 license: MIT license_family: MIT - size: 1586660 - timestamp: 1706907396112 + purls: + - pkg:pypi/pydantic-core + size: 1587970 + timestamp: 1707302639908 - kind: conda name: pydantic-core version: 2.16.2 - build: py310h87d50f1_0 + build: py310h87d50f1_1 + build_number: 1 subdir: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/pydantic-core-2.16.2-py310h87d50f1_0.conda - sha256: 63c448393381cf246a5c17cd8d8760997410a451cc55e029c3221f37da29d2cc - md5: 5fcb5b018181aad91e630402a1a0cefa + url: https://conda.anaconda.org/conda-forge/win-64/pydantic-core-2.16.2-py310h87d50f1_1.conda + sha256: 74dbc9f12af2751e21347812e4a9c5b57eefb1e7c9cdd0bad4c7ade4410aa867 + md5: c899e9ccf167d9b143d68d52c6049ae0 depends: - python >=3.10,<3.11.0a0 - python_abi 3.10.* *_cp310 @@ -1047,16 +1134,19 @@ packages: - vc14_runtime >=14.29.30139 license: MIT license_family: MIT - size: 1616615 - timestamp: 1706907875200 + purls: + - pkg:pypi/pydantic-core + size: 1616493 + timestamp: 1707302899550 - kind: conda name: pydantic-core version: 2.16.2 - build: py310hcb5633a_0 + build: py310hcb5633a_1 + build_number: 1 subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/pydantic-core-2.16.2-py310hcb5633a_0.conda - sha256: 1e491ddbfd53a67f0734fb3f4ccd8782c24935549c281cf5a9bcf9c5ce8b9ec4 - md5: 1a6153b4a9a54c7250c163bcc0c1ac66 + url: https://conda.anaconda.org/conda-forge/linux-64/pydantic-core-2.16.2-py310hcb5633a_1.conda + sha256: 01d3eec5b80c0f38df2ac9896975429924425c1d57b616ed186d5cf6f4805aa9 + md5: 0aeba930e4349289a14a6f2ab20024ef depends: - libgcc-ng >=12 - python >=3.10,<3.11.0a0 @@ -1064,16 +1154,19 @@ packages: - typing-extensions >=4.6.0,!=4.7.0 license: MIT license_family: MIT - size: 1657498 - timestamp: 1706907069048 + purls: + - pkg:pypi/pydantic-core + size: 1655610 + timestamp: 1707302072089 - kind: conda name: pydantic-core version: 2.16.2 - build: py310hd442715_0 + build: py310hd442715_1 + build_number: 1 subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/pydantic-core-2.16.2-py310hd442715_0.conda - sha256: a0dae9f763a5f5b9735b4ad8fdc20e3db0aa2a0f04dbb5d45964b92ef2919cae - md5: 441ce6cc8d003038dff595d2726ace69 + url: https://conda.anaconda.org/conda-forge/osx-arm64/pydantic-core-2.16.2-py310hd442715_1.conda + sha256: 60683230326e1c70a377589d82d4f43e774f436f512043b777a0df6326018ea3 + md5: c0dd98bc59c047bec4b4a2b92472a8b8 depends: - python >=3.10,<3.11.0a0 - python >=3.10,<3.11.0a0 *_cpython @@ -1083,8 +1176,32 @@ packages: - __osx >=11.0 license: MIT license_family: MIT - size: 1474830 - timestamp: 1706907541328 + purls: + - pkg:pypi/pydantic-core + size: 1474967 + timestamp: 1707302679943 +- kind: pypi + name: pytest + version: 8.0.0 + url: https://files.pythonhosted.org/packages/c7/10/727155d44c5e04bb08e880668e53079547282e4f950535234e5a80690564/pytest-8.0.0-py3-none-any.whl#sha256=50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + sha256: 50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6 + requires_dist: + - iniconfig + - packaging + - pluggy <2.0, >=1.3.0 + - exceptiongroup >=1.0.0rc8 ; python_version < '3.11' + - tomli >=1.0.0 ; python_version < '3.11' + - colorama ; sys_platform == 'win32' + - argcomplete ; extra == 'testing' + - attrs >=19.2.0 ; extra == 'testing' + - hypothesis >=3.56 ; extra == 'testing' + - mock ; extra == 'testing' + - nose ; extra == 'testing' + - pygments >=2.7.2 ; extra == 'testing' + - requests ; extra == 'testing' + - setuptools ; extra == 'testing' + - xmlschema ; extra == 'testing' + requires_python: '>=3.8' - kind: conda name: python version: 3.10.0 @@ -1467,69 +1584,69 @@ packages: timestamp: 1679532707590 - kind: conda name: sqlite - version: 3.44.2 + version: 3.45.1 build: h2c6b66d_0 subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/sqlite-3.44.2-h2c6b66d_0.conda - sha256: bae479520fe770fe11996b4c240923ed097f851fbd2401d55540e551c9dbbef7 - md5: 4f2892c672829693fd978d065db4e8be + url: https://conda.anaconda.org/conda-forge/linux-64/sqlite-3.45.1-h2c6b66d_0.conda + sha256: a7cbde68eff5d2ec9bb1b5f2604a523949048a9b5335588eac2d893fd0dd5200 + md5: 93acf31b379acebada263b9bce3dc6ed depends: - libgcc-ng >=12 - - libsqlite 3.44.2 h2797004_0 + - libsqlite 3.45.1 h2797004_0 - libzlib >=1.2.13,<1.3.0a0 - ncurses >=6.4,<7.0a0 - readline >=8.2,<9.0a0 license: Unlicense - size: 836378 - timestamp: 1700863215372 + size: 848194 + timestamp: 1707495171927 - kind: conda name: sqlite - version: 3.44.2 + version: 3.45.1 build: h7461747_0 subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/sqlite-3.44.2-h7461747_0.conda - sha256: 120f42ee2b7cee46711693609f8a7e7918befbd370c960332c0ef13ca651c0d8 - md5: ac6662948d2ccf800474dfdf59fb94bc + url: https://conda.anaconda.org/conda-forge/osx-64/sqlite-3.45.1-h7461747_0.conda + sha256: ce0908a02a1965854dde0022f5ba9b986324077ba4835a3c990463ed762e6e8f + md5: 239ff6ffc3ee45898db19e3cbbf40f88 depends: - - libsqlite 3.44.2 h92b6c6a_0 + - libsqlite 3.45.1 h92b6c6a_0 - libzlib >=1.2.13,<1.3.0a0 - ncurses >=6.4,<7.0a0 - readline >=8.2,<9.0a0 license: Unlicense - size: 890038 - timestamp: 1700863497227 + size: 901237 + timestamp: 1707495392094 - kind: conda name: sqlite - version: 3.44.2 + version: 3.45.1 build: hcfcfb64_0 subdir: win-64 - url: https://conda.anaconda.org/conda-forge/win-64/sqlite-3.44.2-hcfcfb64_0.conda - sha256: 77496bb1b15fe40bae1ca9a9841b906b66f212a534e7c4ef7878c82511c2d0e4 - md5: 27ac1a237f0c9964afba717848811ba8 + url: https://conda.anaconda.org/conda-forge/win-64/sqlite-3.45.1-hcfcfb64_0.conda + sha256: e77d529803d11743306b57d871c1f168da0eaa5a405591a4a53139a9a10cda0c + md5: 3c6f2dc59bcde87ee1de006f22ecc40a depends: - - libsqlite 3.44.2 hcfcfb64_0 + - libsqlite 3.45.1 hcfcfb64_0 - ucrt >=10.0.20348.0 - vc >=14.2,<15 - vc14_runtime >=14.29.30139 license: Unlicense - size: 856472 - timestamp: 1700863720976 + size: 872653 + timestamp: 1707495666981 - kind: conda name: sqlite - version: 3.44.2 + version: 3.45.1 build: hf2abe2d_0 subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/sqlite-3.44.2-hf2abe2d_0.conda - sha256: b034405d93e7153f777d52c18fe26120356c568e4ca85626712d633d939a8923 - md5: c98aa8eb8f02260610c5bb981027ba5d + url: https://conda.anaconda.org/conda-forge/osx-arm64/sqlite-3.45.1-hf2abe2d_0.conda + sha256: 9dc20bca83b44cabedefab92b4484fd41bef36b6c73cd3b31506d209ba0d5c2f + md5: 58918f7a593a143c2f305b832c8802f4 depends: - - libsqlite 3.44.2 h091b4b1_0 + - libsqlite 3.45.1 h091b4b1_0 - libzlib >=1.2.13,<1.3.0a0 - ncurses >=6.4,<7.0a0 - readline >=8.2,<9.0a0 license: Unlicense - size: 803166 - timestamp: 1700863604745 + size: 811984 + timestamp: 1707495456736 - kind: conda name: tk version: 8.6.13 @@ -1593,6 +1710,12 @@ packages: license_family: BSD size: 3318875 timestamp: 1699202167581 +- kind: pypi + name: tomli + version: 2.0.1 + url: https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl#sha256=939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc + sha256: 939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc + requires_python: '>=3.7' - kind: conda name: typing-extensions version: 4.9.0 @@ -1621,6 +1744,8 @@ packages: - python >=3.8 license: PSF-2.0 license_family: PSF + purls: + - pkg:pypi/typing-extensions size: 36058 timestamp: 1702176292645 - kind: conda diff --git a/examples/solve-groups/pixi.toml b/examples/solve-groups/pixi.toml index b98ea8a27..ef21673eb 100644 --- a/examples/solve-groups/pixi.toml +++ b/examples/solve-groups/pixi.toml @@ -14,6 +14,9 @@ pydantic = "*" python = ">=3.8" py-rattler = "*" +[feature.min_py38.pypi-dependencies] +pytest = "*" + [environments] # The solve-group mixes the dependencies of all features in the group and solves them together. # Both environments should have at most python 3.10, even though `min-py38` environment only diff --git a/src/cli/list.rs b/src/cli/list.rs index 71ea50f89..73ff3f0bf 100644 --- a/src/cli/list.rs +++ b/src/cli/list.rs @@ -10,8 +10,9 @@ use rattler_conda_types::Platform; use rattler_lock::Package; use serde::Serialize; +use crate::lock_file::UpdateLockFileOptions; use crate::project::manifest::EnvironmentName; -use crate::{Project, UpdateLockFileOptions}; +use crate::Project; // an enum to sort by size or name #[derive(clap::ValueEnum, Clone, Debug, Serialize)] diff --git a/src/cli/run.rs b/src/cli/run.rs index 7c8f5a361..731df4e8f 100644 --- a/src/cli/run.rs +++ b/src/cli/run.rs @@ -16,9 +16,10 @@ use crate::task::{ AmbiguousTask, ExecutableTask, FailedToParseShellScript, InvalidWorkingDirectory, SearchEnvironments, TaskAndEnvironment, TaskGraph, TaskName, }; -use crate::{Project, UpdateLockFileOptions}; +use crate::Project; -use crate::environment::LockFileDerivedData; +use crate::lock_file::LockFileDerivedData; +use crate::lock_file::UpdateLockFileOptions; use crate::progress::await_in_progress; use crate::project::manifest::EnvironmentName; use crate::project::Environment; diff --git a/src/consts.rs b/src/consts.rs index d0be1c96b..c61917d32 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -6,6 +6,7 @@ pub const PROJECT_LOCK_FILE: &str = "pixi.lock"; pub const PIXI_DIR: &str = ".pixi"; pub const PREFIX_FILE_NAME: &str = "prefix"; pub const ENVIRONMENTS_DIR: &str = "envs"; +pub const SOLVE_GROUP_ENVIRONMENTS_DIR: &str = "solve-group-envs"; pub const PYPI_DEPENDENCIES: &str = "pypi-dependencies"; pub const DEFAULT_ENVIRONMENT_NAME: &str = "default"; @@ -14,4 +15,6 @@ pub const DEFAULT_FEATURE_NAME: &str = DEFAULT_ENVIRONMENT_NAME; lazy_static! { pub static ref TASK_STYLE: Style = Style::new().blue(); + pub static ref PLATFORM_STYLE: Style = Style::new().yellow(); + pub static ref SOLVE_GROUP_STYLE: Style = Style::new().cyan(); } diff --git a/src/environment.rs b/src/environment.rs index 7d0e7fa68..dfd4a3b2b 100644 --- a/src/environment.rs +++ b/src/environment.rs @@ -1,51 +1,28 @@ -use miette::{Context, IntoDiagnostic}; -use rip::resolve::solve_options::SDistResolution; +use miette::IntoDiagnostic; -use crate::lock_file::{resolve_pypi, LockedCondaPackages, LockedPypiPackages}; -use crate::project::virtual_packages::get_minimal_virtual_packages; -use crate::project::{Dependencies, SolveGroup}; use crate::{ - config, consts, install, install_pypi, lock_file, - lock_file::{ - load_lock_file, verify_environment_satisfiability, verify_platform_satisfiability, - PlatformUnsat, - }, + consts, install, install_pypi, + lock_file::UpdateLockFileOptions, prefix::Prefix, - progress::{self, global_multi_progress}, + progress::{self}, project::{ manifest::{EnvironmentName, SystemRequirements}, virtual_packages::verify_current_platform_has_required_virtual_packages, - Environment, + Environment, GroupedEnvironment, GroupedEnvironmentName, }, - repodata::fetch_sparse_repodata_targets, - utils::BarrierCell, - Project, SpecType, + Project, }; -use futures::future::Either; -use futures::stream::FuturesUnordered; -use futures::{FutureExt, StreamExt, TryFutureExt}; -use indexmap::{IndexMap, IndexSet}; -use indicatif::ProgressBar; -use itertools::Itertools; -use rattler::install::{PythonInfo, Transaction}; -use rattler::package_cache::PackageCache; -use rattler_conda_types::{ - Channel, GenericVirtualPackage, MatchSpec, PackageName, Platform, PrefixRecord, RepoDataRecord, +use indexmap::IndexMap; +use rattler::{ + install::{PythonInfo, Transaction}, + package_cache::PackageCache, }; -use rattler_lock::{LockFile, Package, PypiPackageData, PypiPackageEnvironmentData}; +use rattler_conda_types::{Channel, Platform, PrefixRecord, RepoDataRecord}; +use rattler_lock::{PypiPackageData, PypiPackageEnvironmentData}; use rattler_repodata_gateway::sparse::SparseRepoData; use reqwest_middleware::ClientWithMiddleware; -use rip::index::PackageDb; -use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, - convert::identity, - future::{ready, Future}, - io::ErrorKind, - path::Path, - sync::Arc, - time::Duration, -}; +use rip::{index::PackageDb, resolve::solve_options::SDistResolution}; +use std::{collections::HashMap, io::ErrorKind, path::Path, sync::Arc}; /// Verify the location of the prefix folder is not changed so the applied prefix path is still valid. /// Errors when there is a file system error or the path does not align with the defined prefix. @@ -54,7 +31,7 @@ pub fn verify_prefix_location_unchanged(prefix_file: &Path) -> miette::Result<() match std::fs::read_to_string(prefix_file) { // Not found is fine as it can be new or backwards compatible. Err(e) if e.kind() == ErrorKind::NotFound => Ok(()), - // Scream the error if we dont know it. + // Scream the error if we don't know it. Err(e) => Err(e).into_diagnostic(), // Check if the path in the file aligns with the current path. Ok(p) if prefix_file.starts_with(&p) => Ok(()), @@ -179,6 +156,7 @@ pub async fn get_up_to_date_prefix( existing_repo_data, lock_file_usage, no_install, + ..UpdateLockFileOptions::default() }) .await?; @@ -190,38 +168,6 @@ pub async fn get_up_to_date_prefix( } } -/// Options to pass to [`Project::up_to_date_lock_file`]. -#[derive(Default)] -pub struct UpdateLockFileOptions { - /// Defines what to do if the lock-file is out of date - pub lock_file_usage: LockFileUsage, - - /// Don't install anything to disk. - pub no_install: bool, - - /// Existing repodata that can be used to avoid downloading it again. - pub existing_repo_data: IndexMap<(Channel, Platform), SparseRepoData>, -} - -impl Project { - /// Ensures that the lock-file is up-to-date with the project information. - /// - /// Returns the lock-file and any potential derived data that was computed as part of this - /// operation. - pub async fn up_to_date_lock_file( - &self, - options: UpdateLockFileOptions, - ) -> miette::Result> { - ensure_up_to_date_lock_file( - self, - options.existing_repo_data, - options.lock_file_usage, - options.no_install, - ) - .await - } -} - #[allow(clippy::too_many_arguments)] // TODO: refactor args into struct pub async fn update_prefix_pypi( @@ -316,7 +262,7 @@ impl PythonStatus { /// Updates the environment to contain the packages from the specified lock-file pub async fn update_prefix_conda( - environment_name: &EnvironmentName, + environment_name: GroupedEnvironmentName, prefix: &Prefix, package_cache: Arc, authenticated_client: ClientWithMiddleware, @@ -363,1519 +309,7 @@ pub async fn update_prefix_conda( Ok(PythonStatus::from_transaction(&transaction)) } -/// A struct that holds the lock-file and any potential derived data that was computed when calling -/// `ensure_up_to_date_lock_file`. -pub struct LockFileDerivedData<'p> { - /// The lock-file - pub lock_file: LockFile, - - /// The package cache - pub package_cache: Arc, - - /// Repodata that was fetched - pub repo_data: IndexMap<(Channel, Platform), SparseRepoData>, - - /// A list of prefixes that are up-to-date with the latest conda packages. - pub updated_conda_prefixes: HashMap, (Prefix, PythonStatus)>, - - /// A list of prefixes that have been updated while resolving all dependencies. - pub updated_pypi_prefixes: HashMap, Prefix>, -} - -impl<'p> LockFileDerivedData<'p> { - /// Returns the up-to-date prefix for the given environment. - pub async fn prefix(&mut self, environment: &Environment<'p>) -> miette::Result { - if let Some(prefix) = self.updated_pypi_prefixes.get(environment) { - return Ok(prefix.clone()); - } - - // Get the prefix with the conda packages installed. - let platform = Platform::current(); - let package_db = environment.project().pypi_package_db()?; - let (prefix, python_status) = self.conda_prefix(environment).await?; - let repodata_records = self - .repodata_records(environment, platform) - .unwrap_or_default(); - let pypi_records = self.pypi_records(environment, platform).unwrap_or_default(); - - // Update the prefix with Pypi records - update_prefix_pypi( - environment.name(), - &prefix, - platform, - package_db, - &repodata_records, - &pypi_records, - &python_status, - &environment.system_requirements(), - SDistResolution::default(), - ) - .await?; - - // Store that we updated the environment, so we won't have to do it again. - self.updated_pypi_prefixes - .insert(environment.clone(), prefix.clone()); - - Ok(prefix) - } - - fn pypi_records( - &self, - environment: &Environment<'p>, - platform: Platform, - ) -> Option> { - let locked_env = self - .lock_file - .environment(environment.name().as_str()) - .expect("the lock-file should be up-to-date so it should also include the environment"); - locked_env.pypi_packages_for_platform(platform) - } - - fn repodata_records( - &self, - environment: &Environment<'p>, - platform: Platform, - ) -> Option> { - let locked_env = self - .lock_file - .environment(environment.name().as_str()) - .expect("the lock-file should be up-to-date so it should also include the environment"); - locked_env.conda_repodata_records_for_platform(platform).expect("since the lock-file is up to date we should be able to extract the repodata records from it") - } - - async fn conda_prefix( - &mut self, - environment: &Environment<'p>, - ) -> miette::Result<(Prefix, PythonStatus)> { - // If we previously updated this environment, early out. - if let Some((prefix, python_status)) = self.updated_conda_prefixes.get(environment) { - return Ok((prefix.clone(), python_status.clone())); - } - - let prefix = Prefix::new(environment.dir()); - let platform = Platform::current(); - - // Determine the currently installed packages. - let installed_packages = prefix - .find_installed_packages(None) - .await - .with_context(|| { - format!( - "failed to determine the currently installed packages for '{}'", - environment.name(), - ) - })?; - - // Get the locked environment from the lock-file. - let records = self - .repodata_records(environment, platform) - .unwrap_or_default(); - - // Update the prefix with conda packages. - let python_status = update_prefix_conda( - environment.name(), - &prefix, - self.package_cache.clone(), - environment.project().authenticated_client().clone(), - installed_packages, - &records, - platform, - ) - .await?; - - // Store that we updated the environment, so we won't have to do it again. - self.updated_conda_prefixes - .insert(environment.clone(), (prefix.clone(), python_status.clone())); - - Ok((prefix, python_status)) - } -} - -/// A struct that defines which targets are out of date. -struct OutdatedEnvironments<'p> { - conda: HashMap, HashSet>, - pypi: HashMap, HashSet>, -} - -impl<'p> OutdatedEnvironments<'p> { - pub fn from_project_and_lock_file(project: &'p Project, lock_file: &LockFile) -> Self { - let mut outdated_conda: HashMap<_, HashSet<_>> = HashMap::new(); - let mut outdated_pypi: HashMap<_, HashSet<_>> = HashMap::new(); - - for environment in project.environments() { - let platforms = environment.platforms(); - - // Get the locked environment from the environment - let Some(locked_environment) = lock_file.environment(environment.name().as_str()) - else { - tracing::info!( - "environment '{0}' is out of date because it does not exist in the lock-file.", - environment.name().fancy_display() - ); - - outdated_conda - .entry(environment.clone()) - .or_default() - .extend(platforms); - - continue; - }; - - // The locked environment exists, but does it match our project environment? - if let Err(unsat) = verify_environment_satisfiability(&environment, &locked_environment) - { - tracing::info!( - "environment '{0}' is out of date because {unsat}", - environment.name().fancy_display() - ); - - outdated_conda - .entry(environment.clone()) - .or_default() - .extend(platforms); - - continue; - } - - // Verify each individual platform - for platform in platforms { - match verify_platform_satisfiability(&environment, &locked_environment, platform) { - Ok(_) => {} - Err(unsat @ PlatformUnsat::UnsatisfiableRequirement(_, _)) => { - tracing::info!( - "the pypi dependencies of environment '{0}' for platform {platform} are out of date because {unsat}", - environment.name().fancy_display() - ); - - outdated_pypi - .entry(environment.clone()) - .or_default() - .insert(platform); - } - Err(unsat) => { - tracing::info!( - "the dependencies of environment '{0}' for platform {platform} are out of date because {unsat}", - environment.name().fancy_display() - ); - - outdated_conda - .entry(environment.clone()) - .or_default() - .insert(platform); - } - } - } - } - - // Determine which solve-groups are out of date. - let mut conda_solve_groups_out_of_date = HashMap::new(); - let mut pypi_solve_groups_out_of_date = HashMap::new(); - for (environment, platforms) in &outdated_conda { - let Some(solve_group) = environment.solve_group() else { - continue; - }; - conda_solve_groups_out_of_date - .entry(solve_group) - .or_insert_with(HashSet::new) - .extend(platforms.iter().copied()); - } - for (environment, platforms) in &outdated_pypi { - let Some(solve_group) = environment.solve_group() else { - continue; - }; - pypi_solve_groups_out_of_date - .entry(solve_group) - .or_insert_with(HashSet::new) - .extend(platforms.iter().copied()); - } - - // Check solve-groups, all environments in the same solve group must share the same - // dependencies. - for solve_group in project.solve_groups() { - for platform in solve_group - .environments() - .flat_map(|env| env.platforms()) - .unique() - { - // Keep track of if any of the package types are out of date - let mut conda_package_mismatch = false; - let mut pypi_package_mismatch = false; - - // Keep track of the packages by name to check for mismatches between environments. - let mut conda_packages_by_name = HashMap::new(); - let mut pypi_packages_by_name = HashMap::new(); - - // Iterate over all environments to compare the packages. - for env in solve_group.environments() { - if outdated_conda - .get(&env) - .and_then(|p| p.get(&platform)) - .is_some() - { - // If the environment is already out-of-date there is no need to check it, - // because the solve-group is already out-of-date. - break; - } - - let Some(locked_env) = lock_file.environment(env.name().as_str()) else { - // If the environment is missing, we already marked it as out of date. - continue; - }; - - for package in locked_env.packages(platform).into_iter().flatten() { - match package { - Package::Conda(pkg) => { - match conda_packages_by_name.get(&pkg.package_record().name) { - None => { - conda_packages_by_name.insert( - pkg.package_record().name.clone(), - pkg.url().clone(), - ); - } - Some(url) if pkg.url() != url => { - conda_package_mismatch = true; - } - _ => {} - } - } - Package::Pypi(pkg) => { - match pypi_packages_by_name.get(&pkg.data().package.name) { - None => { - pypi_packages_by_name.insert( - pkg.data().package.name.clone(), - pkg.url().clone(), - ); - } - Some(url) if pkg.url() != url => { - pypi_package_mismatch = true; - } - _ => {} - } - } - } - - // If there is a conda package mismatch there is also a pypi mismatch and we - // can break early. - if conda_package_mismatch { - pypi_package_mismatch = true; - break; - } - } - - // If there is a conda package mismatch there is also a pypi mismatch and we can - // break early. - if conda_package_mismatch { - pypi_package_mismatch = true; - break; - } - } - - // If there is a mismatch there is a mismatch for the entire group - if conda_package_mismatch { - conda_solve_groups_out_of_date - .entry(solve_group.clone()) - .or_default() - .insert(platform); - } - - if pypi_package_mismatch { - pypi_solve_groups_out_of_date - .entry(solve_group.clone()) - .or_default() - .insert(platform); - } - } - } - - // Mark the rest of the environments out of date for all solve groups - for (solve_group, platforms) in conda_solve_groups_out_of_date { - for env in solve_group.environments() { - outdated_conda - .entry(env.clone()) - .or_default() - .extend(platforms.iter().copied()); - } - } - - for (solve_group, platforms) in pypi_solve_groups_out_of_date { - for env in solve_group.environments() { - outdated_pypi - .entry(env.clone()) - .or_default() - .extend(platforms.iter().copied()); - } - } - - // For all targets where conda is out of date, the pypi packages are also out of date. - for (environment, platforms) in outdated_conda.iter() { - outdated_pypi - .entry(environment.clone()) - .or_default() - .extend(platforms.iter().copied()); - } - - Self { - conda: outdated_conda, - pypi: outdated_pypi, - } - } - - /// Returns true if the lock-file is up-to-date with the project. - pub fn is_empty(&self) -> bool { - self.conda.is_empty() && self.pypi.is_empty() - } -} - -type PerEnvironment<'p, T> = HashMap, T>; -type PerEnvironmentAndPlatform<'p, T> = HashMap, HashMap>; -type PerGroupAndPlatform<'p, T> = HashMap, HashMap>; - -type LockedCondaPackagesByName = HashMap; - -#[derive(Default)] -struct UpdateContext<'p> { - /// Repodata that is available to the solve tasks. - repo_data: Arc>, - - /// Repodata records from the lock-file. This contains the records that actually exist in the - /// lock-file. If the lock-file is missing or partially missing then the data also won't exist - /// in this field. - locked_repodata_records: PerEnvironmentAndPlatform<'p, Arc>, - - /// Repodata records from the lock-file. This contains the records that actually exist in the - /// lock-file. If the lock-file is missing or partially missing then the data also won't exist - /// in this field. - locked_pypi_records: PerEnvironmentAndPlatform<'p, Arc>, - - /// Keeps track of all pending conda targets that are being solved. The mapping contains a - /// [`BarrierCell`] that will eventually contain the solved records computed by another task. - /// This allows tasks to wait for the records to be solved before proceeding. - solved_repodata_records: - PerEnvironmentAndPlatform<'p, Arc>>>, - - /// Keeps track of all pending prefix updates. This only tracks the conda updates to a prefix, - /// not whether the pypi packages have also been updated. - instantiated_conda_prefixes: PerEnvironment<'p, Arc>>, - - /// Keeps track of all pending conda targets that are being solved. The mapping contains a - /// [`BarrierCell`] that will eventually contain the solved records computed by another task. - /// This allows tasks to wait for the records to be solved before proceeding. - solved_pypi_records: PerEnvironmentAndPlatform<'p, Arc>>>, - - grouped_solved_repodata_records: - PerGroupAndPlatform<'p, Arc>>>, -} - -impl<'p> UpdateContext<'p> { - /// Returns a future that will resolve to the solved repodata records for the given environment - /// or `None` if the records do not exist and are also not in the process of being updated. - pub fn get_latest_repodata_records( - &self, - environment: &Environment<'_>, - platform: Platform, - ) -> Option>>> { - self.solved_repodata_records - .get(environment) - .and_then(|records| records.get(&platform)) - .map(|records| { - let records = records.clone(); - Either::Left(async move { records.wait().await.clone() }) - }) - .or_else(|| { - self.locked_repodata_records - .get(environment) - .and_then(|records| records.get(&platform)) - .cloned() - .map(ready) - .map(Either::Right) - }) - } - - /// Takes the latest repodata records for the given environment and platform. Returns `None` if - /// neither the records exist nor are in the process of being updated. - /// - /// This function panics if the repodata records are still pending. - pub fn take_latest_repodata_records( - &mut self, - environment: &Environment<'p>, - platform: Platform, - ) -> Option> { - self.solved_repodata_records - .get_mut(environment) - .and_then(|records| records.remove(&platform)) - .map(|cell| { - Arc::into_inner(cell) - .expect("records must not be shared") - .into_inner() - .expect("records must be available") - }) - .or_else(|| { - self.locked_repodata_records - .get_mut(environment) - .and_then(|records| records.remove(&platform)) - }) - .map(|records| Arc::try_unwrap(records).unwrap_or_else(|arc| (*arc).clone())) - } - - /// Takes the latest pypi records for the given environment and platform. Returns `None` if - /// neither the records exist nor are in the process of being updated. - /// - /// This function panics if the repodata records are still pending. - pub fn take_latest_pypi_records( - &mut self, - environment: &Environment<'p>, - platform: Platform, - ) -> Option> { - self.solved_pypi_records - .get_mut(environment) - .and_then(|records| records.remove(&platform)) - .map(|cell| { - Arc::into_inner(cell) - .expect("records must not be shared") - .into_inner() - .expect("records must be available") - }) - .or_else(|| { - self.locked_pypi_records - .get_mut(environment) - .and_then(|records| records.remove(&platform)) - }) - .map(|records| Arc::try_unwrap(records).unwrap_or_else(|arc| (*arc).clone())) - } - - /// Get a list of conda prefixes that have been updated. - pub fn take_instantiated_conda_prefixes( - &mut self, - ) -> HashMap, (Prefix, PythonStatus)> { - self.instantiated_conda_prefixes - .drain() - .map(|(env, cell)| { - let prefix = Arc::into_inner(cell) - .expect("prefixes must not be shared") - .into_inner() - .expect("prefix must be available"); - (env, prefix) - }) - .collect() - } - - /// Returns a future that will resolve to the solved repodata records for the given environment - /// or `None` if no task was spawned to instantiate the prefix. - pub fn get_conda_prefix( - &self, - environment: &Environment<'p>, - ) -> Option> { - let cell = self.instantiated_conda_prefixes.get(environment)?.clone(); - Some(async move { cell.wait().await.clone() }) - } -} - -/// Ensures that the lock-file is up-to-date with the project. -/// -/// This function will return a [`LockFileDerivedData`] struct that contains the lock-file and any -/// potential derived data that was computed as part of this function. The derived data might be -/// usable by other functions to avoid recomputing the same data. -/// -/// This function starts by checking if the lock-file is up-to-date. If it is not up-to-date it will -/// construct a task graph of all the work that needs to be done to update the lock-file. The tasks -/// are awaited in a specific order to make sure that we can start instantiating prefixes as soon as -/// possible. -async fn ensure_up_to_date_lock_file( - project: &Project, - existing_repo_data: IndexMap<(Channel, Platform), SparseRepoData>, - lock_file_usage: LockFileUsage, - no_install: bool, -) -> miette::Result> { - let lock_file = load_lock_file(project).await?; - let current_platform = Platform::current(); - let package_cache = Arc::new(PackageCache::new(config::get_cache_dir()?.join("pkgs"))); - - // should we check the lock-file in the first place? - if !lock_file_usage.should_check_if_out_of_date() { - tracing::info!("skipping check if lock-file is up-to-date"); - - return Ok(LockFileDerivedData { - lock_file, - package_cache, - repo_data: existing_repo_data, - updated_conda_prefixes: Default::default(), - updated_pypi_prefixes: Default::default(), - }); - } - - // Check which environments are out of date. - let outdated = OutdatedEnvironments::from_project_and_lock_file(project, &lock_file); - if outdated.is_empty() { - tracing::info!("the lock-file is up-to-date"); - - // If no-environment is outdated we can return early. - return Ok(LockFileDerivedData { - lock_file, - package_cache, - repo_data: existing_repo_data, - updated_conda_prefixes: Default::default(), - updated_pypi_prefixes: Default::default(), - }); - } - - // If the lock-file is out of date, but we're not allowed to update it, we should exit. - if !lock_file_usage.allows_lock_file_updates() { - miette::bail!("lock-file not up-to-date with the project"); - } - - // Determine the repodata that we're going to need to solve the environments. For all outdated - // conda targets we take the union of all the channels that are used by the environment. - // - // The NoArch platform is always added regardless of whether it is explicitly used by the - // environment. - let mut fetch_targets = IndexSet::new(); - for (environment, platforms) in outdated.conda.iter() { - for channel in environment.channels() { - for platform in platforms { - fetch_targets.insert((channel.clone(), *platform)); - } - fetch_targets.insert((channel.clone(), Platform::NoArch)); - } - } - - // Fetch all the repodata that we need to solve the environments. - let mut repo_data = fetch_sparse_repodata_targets( - fetch_targets - .into_iter() - .filter(|target| !existing_repo_data.contains_key(target)), - project.authenticated_client(), - ) - .await?; - - // Add repo data that was already fetched - repo_data.extend(existing_repo_data); - - // Extract the current conda records from the lock-file - // TODO: Should we parallelize this? Measure please. - let locked_repodata_records = project - .environments() - .into_iter() - .flat_map(|env| { - lock_file - .environment(env.name().as_str()) - .into_iter() - .map(move |locked_env| { - locked_env.conda_repodata_records().map(|records| { - ( - env.clone(), - records - .into_iter() - .map(|(platform, records)| (platform, Arc::new(records))) - .collect(), - ) - }) - }) - }) - .collect::>, _>>() - .into_diagnostic()?; - - let locked_pypi_records = project - .environments() - .into_iter() - .flat_map(|env| { - lock_file - .environment(env.name().as_str()) - .into_iter() - .map(move |locked_env| { - ( - env.clone(), - locked_env - .pypi_packages() - .into_iter() - .map(|(platform, records)| (platform, Arc::new(records))) - .collect(), - ) - }) - }) - .collect::>>(); - - let mut context = UpdateContext { - repo_data: Arc::new(repo_data), - locked_repodata_records, - locked_pypi_records, - solved_repodata_records: HashMap::new(), - instantiated_conda_prefixes: HashMap::new(), - solved_pypi_records: HashMap::new(), - grouped_solved_repodata_records: HashMap::new(), - }; - - // This will keep track of all outstanding tasks that we need to wait for. All tasks are added - // to this list after they are spawned. This function blocks until all pending tasks have either - // completed or errored. - let mut pending_futures = FuturesUnordered::new(); - - // Spawn tasks for all the conda targets that are out of date. - for (environment, platforms) in outdated.conda { - // Turn the platforms into an IndexSet, so we have a little control over the order in which - // we solve the platforms. We want to solve the current platform first, so we can start - // instantiating prefixes if we have to. - let mut ordered_platforms = platforms.into_iter().collect::>(); - if let Some(current_platform_index) = ordered_platforms.get_index_of(¤t_platform) { - ordered_platforms.move_index(current_platform_index, 0); - } - - // Determine the source of the solve information - let source = GroupedEnvironment::from(environment.clone()); - - for platform in ordered_platforms { - // Is there an existing pending task to solve the group? - let group_solve_records = if let Some(cell) = context - .grouped_solved_repodata_records - .get(&source) - .and_then(|platforms| platforms.get(&platform)) - { - // Yes, we can reuse the existing cell. - cell.clone() - } else { - // No, we need to spawn a task to update for the entire solve group. - // - // Determine the existing records for the group and platform. If there are multiple - // environments that contain the same packages (because they were previously not in - // the same solve group), we only take the latest version of the package. - let mut existing_records = HashMap::new(); - for env in source.environments() { - for record in context - .locked_repodata_records - .get(&env) - .and_then(|env| env.get(&platform)) - .into_iter() - .flat_map(|records| records.iter()) - { - match existing_records.get(&record.package_record.name) { - None => { - existing_records - .insert(record.package_record.name.clone(), record.clone()); - } - Some(existing) - if existing.package_record.version - < record.package_record.version => - { - existing_records - .insert(record.package_record.name.clone(), record.clone()); - } - _ => {} - } - } - } - let existing_records = existing_records.into_values().collect_vec(); - - // Spawn a task to solve the group. - let group_solve_task = spawn_solve_conda_environment_task( - source.clone(), - existing_records, - context.repo_data.clone(), - platform, - ) - .boxed_local(); - - // Store the task so we can poll it later. - pending_futures.push(group_solve_task); - - // Create an entry that can be used by other tasks to wait for the result. - let cell = Arc::new(BarrierCell::new()); - let previous_cell = context - .grouped_solved_repodata_records - .entry(source.clone()) - .or_default() - .insert(platform, cell.clone()); - assert!( - previous_cell.is_none(), - "a cell has already been added to update conda records" - ); - - cell - }; - - // Spawn a task to extract the records from the group solve task. - let records_future = - spawn_extract_conda_environment_task(environment.clone(), platform, async move { - group_solve_records.wait().await.clone() - }) - .boxed_local(); - - pending_futures.push(records_future); - let previous_cell = context - .solved_repodata_records - .entry(environment.clone()) - .or_default() - .insert(platform, Arc::default()); - assert!( - previous_cell.is_none(), - "a cell has already been added to update conda records" - ); - } - } - - // Spawn tasks to instantiate prefixes that we need to be able to solve Pypi packages. - // - // Solving Pypi packages requires a python interpreter to be present in the prefix, therefore we - // first need to make sure we have conda packages available, then we can instantiate the - // prefix with at least the required conda packages (including a python interpreter) and then - // we can solve the Pypi packages using the installed interpreter. - // - // We only need to instantiate the prefix for the current platform. - for (environment, platforms) in outdated.pypi.iter() { - // Only instantiate a prefix if any of the platforms actually contain pypi dependencies. If - // there are no pypi-dependencies than solving is also not required and thus a prefix is - // also not required. - if !platforms - .iter() - .any(|p| !environment.pypi_dependencies(Some(*p)).is_empty()) - { - continue; - } - - // If we are not allowed to install, we can't instantiate a prefix. - if no_install { - miette::bail!("Cannot update pypi dependencies without first installing a conda prefix that includes python."); - } - - // Construct a future that will resolve when we have the repodata available for the current - // platform for this environment. - let records_future = context - .get_latest_repodata_records(environment, current_platform) - .expect("conda records should be available now or in the future"); - - // Spawn a task to instantiate the environment - let environment_name = environment.name().clone(); - let pypi_env_task = - spawn_create_prefix_task(environment.clone(), package_cache.clone(), records_future) - .map_err(move |e| { - e.context(format!( - "failed to instantiate a prefix for '{}'", - environment_name - )) - }) - .boxed_local(); - - pending_futures.push(pypi_env_task); - context - .instantiated_conda_prefixes - .insert(environment.clone(), Arc::new(BarrierCell::new())); - } - - // Spawn tasks to update the pypi packages. - for (environment, platform) in outdated - .pypi - .into_iter() - .flat_map(|(env, platforms)| platforms.into_iter().map(move |p| (env.clone(), p))) - { - let dependencies = environment.pypi_dependencies(Some(platform)); - let pypi_solve_task = if dependencies.is_empty() { - // If there are no pypi dependencies we can skip solving the pypi packages. - Either::Left(ready(Ok(TaskResult::PypiSolved( - environment.name().clone(), - platform, - Vec::new(), - )))) - } else { - // Construct a future that will resolve when we have the repodata available - let repodata_future = context - .get_latest_repodata_records(&environment, platform) - .expect("conda records should be available now or in the future"); - - // Construct a future that will resolve when we have the conda prefix available - let prefix_future = context - .get_conda_prefix(&environment) - .expect("prefix should be available now or in the future"); - - // Spawn a task to solve the pypi environment - let pypi_solve_future = spawn_solve_pypi_task( - environment.clone(), - platform, - repodata_future, - prefix_future, - SDistResolution::default(), - ); - - Either::Right(pypi_solve_future) - }; - - pending_futures.push(pypi_solve_task.boxed_local()); - let previous_cell = context - .solved_pypi_records - .entry(environment) - .or_default() - .insert(platform, Arc::new(BarrierCell::new())); - assert!( - previous_cell.is_none(), - "a cell has already been added to update pypi records" - ); - } - - let top_level_progress = - global_multi_progress().add(ProgressBar::new(pending_futures.len() as u64)); - top_level_progress.set_style(indicatif::ProgressStyle::default_bar() - .template("{spinner:.cyan} {prefix:20!} [{elapsed_precise}] [{bar:40!.bright.yellow/dim.white}] {pos:>4}/{len:4} {wide_msg:.dim}").unwrap() - .progress_chars("━━╾─")); - top_level_progress.enable_steady_tick(Duration::from_millis(50)); - top_level_progress.set_prefix("updating lock-file"); - - // Iterate over all the futures we spawned and wait for them to complete. - // - // The spawned futures each result either in an error or in a `TaskResult`. The `TaskResult` - // contains the result of the task. The results are stored into [`BarrierCell`]s which allows - // other tasks to respond to the data becoming available. - // - // A loop on the main task is used versus individually spawning all tasks for two reasons: - // - // 1. This provides some control over when data is polled and broadcasted to other tasks. No - // data is broadcasted until we start polling futures here. This reduces the risk of - // race-conditions where data has already been broadcasted before a task subscribes to it. - // 2. The futures stored in `pending_futures` do not necessarily have to be `'static`. Which - // makes them easier to work with. - while let Some(result) = pending_futures.next().await { - top_level_progress.inc(1); - match result? { - TaskResult::CondaGroupSolved(group_name, platform, records) => { - let group = match &group_name { - GroupedEnvironmentName::Group(name) => GroupedEnvironment::Group( - project.solve_group(name).expect("solve group should exist"), - ), - GroupedEnvironmentName::Environment(name) => GroupedEnvironment::Environment( - project.environment(name).expect("environment should exist"), - ), - }; - - context - .grouped_solved_repodata_records - .get_mut(&group) - .expect("the entry for this environment should exist") - .get_mut(&platform) - .expect("the entry for this platform should exist") - .set(Arc::new(records)) - .expect("records should not be solved twice"); - - match group_name { - GroupedEnvironmentName::Group(group_name) => { - tracing::info!( - "solved conda package for solve group '{}' '{}'", - group_name, - platform - ); - } - GroupedEnvironmentName::Environment(env_name) => { - tracing::info!( - "solved conda package for environment '{}' '{}'", - env_name, - platform - ); - } - } - } - TaskResult::CondaSolved(environment, platform, records) => { - let environment = project - .environment(&environment) - .expect("environment should exist"); - - context - .solved_repodata_records - .get_mut(&environment) - .expect("the entry for this environment should exist") - .get_mut(&platform) - .expect("the entry for this platform should exist") - .set(Arc::new(records)) - .expect("records should not be solved twice"); - - tracing::info!( - "extracted conda packages for '{}' '{}'", - environment.name().fancy_display(), - platform - ); - } - TaskResult::CondaPrefixUpdated(environment, prefix, python_status) => { - let environment = project - .environment(&environment) - .expect("environment should exist"); - - context - .instantiated_conda_prefixes - .get_mut(&environment) - .expect("the entry for this environment should exists") - .set((prefix, *python_status)) - .expect("prefix should not be instantiated twice"); - - tracing::info!( - "updated conda packages in the '{}' prefix", - environment.name().fancy_display() - ); - } - TaskResult::PypiSolved(environment, platform, records) => { - let environment = project - .environment(&environment) - .expect("environment should exist"); - - context - .solved_pypi_records - .get_mut(&environment) - .expect("the entry for this environment should exist") - .get_mut(&platform) - .expect("the entry for this platform should exist") - .set(Arc::new(records)) - .expect("records should not be solved twice"); - - tracing::info!( - "solved pypi packages for '{}' '{}'", - environment.name().fancy_display(), - platform - ); - } - } - } - - // Construct a new lock-file containing all the updated or old records. - let mut builder = LockFile::builder(); - - // Iterate over all environments and add their records to the lock-file. - for environment in project.environments() { - builder.set_channels( - environment.name().as_str(), - environment - .channels() - .into_iter() - .map(|channel| rattler_lock::Channel::from(channel.base_url().to_string())), - ); - - for platform in environment.platforms() { - if let Some(records) = context.take_latest_repodata_records(&environment, platform) { - for record in records { - builder.add_conda_package(environment.name().as_str(), platform, record.into()); - } - } - if let Some(records) = context.take_latest_pypi_records(&environment, platform) { - for (pkg_data, pkg_env_data) in records { - builder.add_pypi_package( - environment.name().as_str(), - platform, - pkg_data, - pkg_env_data, - ); - } - } - } - } - - // Store the lock file - let lock_file = builder.finish(); - lock_file - .to_path(&project.lock_file_path()) - .into_diagnostic() - .context("failed to write lock-file to disk")?; - - top_level_progress.finish_and_clear(); - - Ok(LockFileDerivedData { - lock_file, - package_cache, - updated_conda_prefixes: context.take_instantiated_conda_prefixes(), - updated_pypi_prefixes: HashMap::default(), - repo_data: Arc::into_inner(context.repo_data) - .expect("repo data should not be shared anymore"), - }) -} - -/// Represents data that is sent back from a task. This is used to communicate the result of a task -/// back to the main task which will forward the information to other tasks waiting for results. -enum TaskResult { - CondaGroupSolved(GroupedEnvironmentName, Platform, LockedCondaPackagesByName), - CondaSolved(EnvironmentName, Platform, LockedCondaPackages), - CondaPrefixUpdated(EnvironmentName, Prefix, Box), - PypiSolved( - EnvironmentName, - Platform, - Vec<(PypiPackageData, PypiPackageEnvironmentData)>, - ), -} - -/// A task that solves the conda dependencies for a given environment. -async fn spawn_solve_conda_environment_task( - group: GroupedEnvironment<'_>, - existing_repodata_records: Vec, - sparse_repo_data: Arc>, - platform: Platform, -) -> miette::Result { - // Get the dependencies for this platform - let dependencies = group.dependencies(None, Some(platform)); - - // Get the virtual packages for this platform - let virtual_packages = group.virtual_packages(platform); - - // Get the environment name - let group_name = group.name(); - - // The list of channels and platforms we need for this task - let channels = group.channels().into_iter().cloned().collect_vec(); - - // Capture local variables - let sparse_repo_data = sparse_repo_data.clone(); - - // Whether there are pypi dependencies, and we should fetch purls. - let has_pypi_dependencies = group.has_pypi_dependencies(); - - tokio::spawn(async move { - let pb = SolveProgressBar::new( - global_multi_progress().add(ProgressBar::hidden()), - platform, - group_name.clone(), - ); - pb.start(); - - // Convert the dependencies into match specs - let match_specs = dependencies - .iter_specs() - .map(|(name, constraint)| { - MatchSpec::from_nameless(constraint.clone(), Some(name.clone())) - }) - .collect_vec(); - - // Extract the package names from the dependencies - let package_names = dependencies.names().cloned().collect_vec(); - - // Extract the repo data records needed to solve the environment. - pb.set_message("loading repodata"); - let available_packages = load_sparse_repo_data_async( - package_names.clone(), - sparse_repo_data, - channels, - platform, - ) - .await?; - - // Solve conda packages - pb.set_message("resolving conda"); - let mut records = lock_file::resolve_conda_dependencies( - match_specs, - virtual_packages, - existing_repodata_records, - available_packages, - )?; - - // Add purl's for the conda packages that are also available as pypi packages if we need them. - if has_pypi_dependencies { - lock_file::pypi::amend_pypi_purls(&mut records).await?; - } - - // Turn the records into a map by name - let records_by_name = records - .into_iter() - .map(|record| (record.package_record.name.clone(), record)) - .collect(); - - // Finish the progress bar - pb.finish(); - - Ok(TaskResult::CondaGroupSolved( - group_name, - platform, - records_by_name, - )) - }) - .await - .unwrap_or_else(|e| match e.try_into_panic() { - Ok(panic) => std::panic::resume_unwind(panic), - Err(_err) => Err(miette::miette!("the operation was cancelled")), - }) -} - -/// Distill the repodata that is applicable for the given `environment` from the repodata of an entire solve group. -async fn spawn_extract_conda_environment_task( - environment: Environment<'_>, - platform: Platform, - solve_group_records: impl Future>, -) -> miette::Result { - let group = GroupedEnvironment::from(environment.clone()); - - // Await the records from the group - let group_records = solve_group_records.await; - - // If the group is just the environment on its own we can immediately return the records. - if matches!(group, GroupedEnvironment::Environment(_)) { - return Ok(TaskResult::CondaSolved( - environment.name().clone(), - platform, - group_records - .iter() - .map(|(_, record)| record) - .cloned() - .collect(), - )); - } - - let virtual_package_names = group - .virtual_packages(platform) - .into_iter() - .map(|vp| vp.name) - .collect::>(); - - let environment_dependencies = environment.dependencies(None, Some(platform)); - - let environment_records = extract_referenced_conda_records( - &group_records, - &virtual_package_names, - &environment_dependencies, - ); - - Ok(TaskResult::CondaSolved( - environment.name().clone(), - platform, - environment_records, - )) -} - -/// A task that solves the pypi dependencies for a given environment. -async fn spawn_solve_pypi_task( - environment: Environment<'_>, - platform: Platform, - repodata_records: impl Future>>, - prefix: impl Future, - sdist_resolution: SDistResolution, -) -> miette::Result { - // Get the Pypi dependencies for this environment - let dependencies = environment.pypi_dependencies(Some(platform)); - if dependencies.is_empty() { - return Ok(TaskResult::PypiSolved( - environment.name().clone(), - platform, - Vec::new(), - )); - } - - // Get the system requirements for this environment - let system_requirements = environment.system_requirements(); - - // Get the package database - let package_db = environment.project().pypi_package_db()?; - - // Wait until the conda records and prefix are available. - let (repodata_records, (prefix, python_status)) = tokio::join!(repodata_records, prefix); - - let environment_name = environment.name().clone(); - let pypi_packages = tokio::spawn(async move { - let pb = SolveProgressBar::new( - global_multi_progress().add(ProgressBar::hidden()), - platform, - GroupedEnvironmentName::Environment(environment_name), - ); - pb.start(); - - let result = resolve_pypi( - package_db, - dependencies, - system_requirements, - &repodata_records, - &[], - platform, - &pb.pb, - python_status - .location() - .map(|path| prefix.root().join(path)) - .as_deref(), - sdist_resolution, - ) - .await; - - pb.finish(); - - result - }) - .await - .unwrap_or_else(|e| match e.try_into_panic() { - Ok(panic) => std::panic::resume_unwind(panic), - Err(_err) => Err(miette::miette!("the operation was cancelled")), - })?; - - Ok(TaskResult::PypiSolved( - environment.name().clone(), - platform, - pypi_packages, - )) -} - -/// Updates the prefix for the given environment. -/// -/// This function will wait until the conda records for the prefix are available. -async fn spawn_create_prefix_task( - environment: Environment<'_>, - package_cache: Arc, - conda_records: impl Future>>, -) -> miette::Result { - let environment_name = environment.name().clone(); - let prefix = Prefix::new(environment.dir()); - let client = environment.project().authenticated_client().clone(); - - // Spawn a task to determine the currently installed packages. - let installed_packages_future = tokio::spawn({ - let prefix = prefix.clone(); - async move { prefix.find_installed_packages(None).await } - }) - .unwrap_or_else(|e| match e.try_into_panic() { - Ok(panic) => std::panic::resume_unwind(panic), - Err(_err) => Err(miette::miette!("the operation was cancelled")), - }); - - // Wait until the conda records are available and until the installed packages for this prefix - // are available. - let (conda_records, installed_packages) = - tokio::try_join!(conda_records.map(Ok), installed_packages_future)?; - - // Spawn a background task to update the prefix - let python_status = tokio::spawn({ - let prefix = prefix.clone(); - let environment_name = environment_name.clone(); - async move { - update_prefix_conda( - &environment_name, - &prefix, - package_cache, - client, - installed_packages, - &conda_records, - Platform::current(), - ) - .await - } - }) - .await - .unwrap_or_else(|e| match e.try_into_panic() { - Ok(panic) => std::panic::resume_unwind(panic), - Err(_err) => Err(miette::miette!("the operation was cancelled")), - })?; - - Ok(TaskResult::CondaPrefixUpdated( - environment_name, - prefix, - Box::new(python_status), - )) -} - -/// Load the repodata records for the specified platform and package names in the background. This -/// is a CPU and IO intensive task so we run it in a blocking task to not block the main task. -pub async fn load_sparse_repo_data_async( - package_names: Vec, - sparse_repo_data: Arc>, - channels: Vec, - platform: Platform, -) -> miette::Result>> { - tokio::task::spawn_blocking(move || { - let sparse = channels - .into_iter() - .cartesian_product(vec![platform, Platform::NoArch]) - .filter_map(|target| sparse_repo_data.get(&target)); - - // Load only records we need for this platform - SparseRepoData::load_records_recursive(sparse, package_names, None).into_diagnostic() - }) - .await - .map_err(|e| match e.try_into_panic() { - Ok(panic) => std::panic::resume_unwind(panic), - Err(_err) => miette::miette!("the operation was cancelled"), - }) - .map_or_else(Err, identity) - .with_context(|| { - format!( - "failed to load repodata records for platform '{}'", - platform.as_str() - ) - }) -} - -/// A helper struct that manages a progress-bar for solving an environment. -#[derive(Clone)] -pub(crate) struct SolveProgressBar { - pb: ProgressBar, - platform: Platform, - environment_name: GroupedEnvironmentName, -} - -impl SolveProgressBar { - pub fn new( - pb: ProgressBar, - platform: Platform, - environment_name: GroupedEnvironmentName, - ) -> Self { - pb.set_style( - indicatif::ProgressStyle::with_template(&format!( - " ({:>12}) {:<9} ..", - environment_name.fancy_display(), - platform.to_string(), - )) - .unwrap(), - ); - pb.enable_steady_tick(Duration::from_millis(100)); - Self { - pb, - platform, - environment_name, - } - } - - pub fn start(&self) { - self.pb.reset_elapsed(); - self.pb.set_style( - indicatif::ProgressStyle::with_template(&format!( - " {{spinner:.dim}} {:>12}: {:<9} [{{elapsed_precise}}] {{msg:.dim}}", - self.environment_name.fancy_display(), - self.platform.to_string(), - )) - .unwrap(), - ); - } - - pub fn set_message(&self, msg: impl Into>) { - self.pb.set_message(msg); - } - - pub fn finish(&self) { - self.pb.set_style( - indicatif::ProgressStyle::with_template(&format!( - " {} ({:>12}) {:<9} [{{elapsed_precise}}]", - console::style(console::Emoji("✔", "↳")).green(), - self.environment_name.fancy_display(), - self.platform.to_string(), - )) - .unwrap(), - ); - self.pb.finish_and_clear(); - } -} - -/// Either a solve group or an individual environment without a solve group. -#[derive(Debug, Hash, Eq, PartialEq, Clone)] -pub enum GroupedEnvironment<'p> { - Group(SolveGroup<'p>), - Environment(Environment<'p>), -} - -#[derive(Clone)] -pub enum GroupedEnvironmentName { - Group(String), - Environment(EnvironmentName), -} - -impl GroupedEnvironmentName { - pub fn fancy_display(&self) -> console::StyledObject<&str> { - match self { - GroupedEnvironmentName::Group(name) => console::style(name.as_str()).magenta(), - GroupedEnvironmentName::Environment(name) => name.fancy_display(), - } - } -} -impl<'p> From> for GroupedEnvironment<'p> { - fn from(source: SolveGroup<'p>) -> Self { - GroupedEnvironment::Group(source) - } -} - -impl<'p> From> for GroupedEnvironment<'p> { - fn from(source: Environment<'p>) -> Self { - source.solve_group().map_or_else( - || GroupedEnvironment::Environment(source), - GroupedEnvironment::Group, - ) - } -} - -impl<'p> GroupedEnvironment<'p> { - pub fn name(&self) -> GroupedEnvironmentName { - match self { - GroupedEnvironment::Group(group) => { - GroupedEnvironmentName::Group(group.name().to_string()) - } - GroupedEnvironment::Environment(env) => { - GroupedEnvironmentName::Environment(env.name().clone()) - } - } - } - - pub fn environments(&self) -> impl Iterator> + '_ { - match self { - GroupedEnvironment::Group(group) => itertools::Either::Left(group.environments()), - GroupedEnvironment::Environment(env) => { - itertools::Either::Right(vec![env.clone()].into_iter()) - } - } - } - - pub fn dependencies(&self, kind: Option, platform: Option) -> Dependencies { - match self { - GroupedEnvironment::Group(group) => group.dependencies(kind, platform), - GroupedEnvironment::Environment(env) => env.dependencies(kind, platform), - } - } - - pub fn system_requirements(&self) -> SystemRequirements { - match self { - GroupedEnvironment::Group(group) => group.system_requirements(), - GroupedEnvironment::Environment(env) => env.system_requirements(), - } - } - - pub fn virtual_packages(&self, platform: Platform) -> Vec { - get_minimal_virtual_packages(platform, &self.system_requirements()) - .into_iter() - .map(GenericVirtualPackage::from) - .collect() - } - - pub fn channels(&self) -> IndexSet<&'p Channel> { - match self { - GroupedEnvironment::Group(group) => group.channels(), - GroupedEnvironment::Environment(env) => env.channels(), - } - } - - pub fn has_pypi_dependencies(&self) -> bool { - match self { - GroupedEnvironment::Group(group) => group.has_pypi_dependencies(), - GroupedEnvironment::Environment(env) => env.has_pypi_dependencies(), - } - } -} - -/// Given a list of dependencies, and list of conda repodata records by package name recursively extract all the -/// repodata records that are needed to satisfy the requirements. -/// -/// This function only looks at the names of the packages and does not actually match the requirements of the -/// dependencies. This function assumes that the repodata records from `records` form a consistent environment. If -/// this turns out not to be the case this function might panic. -fn extract_referenced_conda_records( - records: &LockedCondaPackagesByName, - virtual_packages: &HashSet, - dependencies: &Dependencies, -) -> LockedCondaPackages { - let mut queue = dependencies - .iter_specs() - .map(|(name, _)| name.clone()) - .collect_vec(); - let mut queued_names = queue.iter().cloned().collect::>(); - let mut result = Vec::new(); - while let Some(package) = queue.pop() { - // Find the record in the superset of records - let found_package = if virtual_packages.contains(&package) { - continue; - } else if let Some(record) = records.get(&package) { - record - } else { - unreachable!("missing package '{}' from superset", package.as_source()); - }; - - // Find all the dependencies of the package and add them to the queue - for dependency in found_package.package_record.depends.iter() { - let dependency_name = PackageName::new_unchecked( - dependency.split_once(' ').unwrap_or((&dependency, "")).0, - ); - if queued_names.insert(dependency_name.clone()) { - queue.push(dependency_name); - } - } - - result.push(found_package.clone()); - } - - result -} +pub type PerEnvironment<'p, T> = HashMap, T>; +pub type PerGroup<'p, T> = HashMap, T>; +pub type PerEnvironmentAndPlatform<'p, T> = PerEnvironment<'p, HashMap>; +pub type PerGroupAndPlatform<'p, T> = PerGroup<'p, HashMap>; diff --git a/src/install.rs b/src/install.rs index 716c82677..5cd611663 100644 --- a/src/install.rs +++ b/src/install.rs @@ -142,9 +142,8 @@ pub async fn execute_transaction( .await; // Post-process the environment installation to unclobber all files deterministically - let new_prefix_records = PrefixRecord::collect_from_prefix(&target_prefix).into_diagnostic()?; install_driver - .post_process(&new_prefix_records, &target_prefix) + .post_process(transaction, &target_prefix) .into_diagnostic()?; // Clear progress bars diff --git a/src/lib.rs b/src/lib.rs index 90f8b429a..87292025e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,8 +21,8 @@ mod pypi_marker_env; mod pypi_tags; pub use activation::get_activation_env; -pub use environment::UpdateLockFileOptions; pub use lock_file::load_lock_file; +pub use lock_file::UpdateLockFileOptions; pub use project::{ manifest::{EnvironmentName, FeatureName}, DependencyType, Project, SpecType, diff --git a/src/lock_file/mod.rs b/src/lock_file/mod.rs index 6faf0ef9a..9ff1fae09 100644 --- a/src/lock_file/mod.rs +++ b/src/lock_file/mod.rs @@ -1,30 +1,37 @@ #![deny(dead_code)] +mod outdated; mod package_identifier; pub(crate) mod pypi; mod pypi_name_mapping; +mod records_by_name; +mod resolve; mod satisfiability; +mod update; use crate::Project; -use indexmap::IndexMap; -use indicatif::ProgressBar; use miette::IntoDiagnostic; -use rattler_conda_types::{GenericVirtualPackage, MatchSpec, Platform, RepoDataRecord}; -use rattler_lock::{LockFile, PackageHashes, PypiPackageData, PypiPackageEnvironmentData}; -use rattler_solve::{resolvo, SolverImpl}; -use rip::{index::PackageDb, resolve::solve_options::SDistResolution}; -use std::{path::Path, sync::Arc}; +use rattler_conda_types::RepoDataRecord; +use rattler_lock::{LockFile, PypiPackageData, PypiPackageEnvironmentData}; -use crate::project::manifest::{PyPiRequirement, SystemRequirements}; +pub use outdated::OutdatedEnvironments; +pub use package_identifier::PypiPackageIdentifier; +pub use records_by_name::{PypiRecordsByName, RepoDataRecordsByName}; +pub use resolve::{resolve_conda, resolve_pypi}; pub use satisfiability::{ verify_environment_satisfiability, verify_platform_satisfiability, PlatformUnsat, }; +pub use update::{LockFileDerivedData, UpdateLockFileOptions}; /// A list of conda packages that are locked for a specific platform. pub type LockedCondaPackages = Vec; /// A list of Pypi packages that are locked for a specific platform. -pub type LockedPypiPackages = Vec<(PypiPackageData, PypiPackageEnvironmentData)>; +pub type LockedPypiPackages = Vec; + +/// A single Pypi record that contains both the package data and the environment data. In Pixi we +/// basically always need both. +pub type PypiRecord = (PypiPackageData, PypiPackageEnvironmentData); /// Loads the lockfile for the specified project or returns a dummy one if none could be found. pub async fn load_lock_file(project: &Project) -> miette::Result { @@ -38,91 +45,3 @@ pub async fn load_lock_file(project: &Project) -> miette::Result { Ok(LockFile::default()) } } - -#[allow(clippy::too_many_arguments)] -pub async fn resolve_pypi( - package_db: Arc, - dependencies: IndexMap>, - system_requirements: SystemRequirements, - locked_conda_records: &[RepoDataRecord], - _locked_pypi_records: &[(PypiPackageData, PypiPackageEnvironmentData)], - platform: Platform, - pb: &ProgressBar, - python_location: Option<&Path>, - sdist_resolution: SDistResolution, -) -> miette::Result { - // Solve python packages - pb.set_message("resolving pypi dependencies"); - let python_artifacts = pypi::resolve_dependencies( - package_db.clone(), - dependencies, - system_requirements, - platform, - locked_conda_records, - python_location, - sdist_resolution, - ) - .await?; - - // Clear message - pb.set_message(""); - - // Add pip packages - let mut locked_packages = LockedPypiPackages::with_capacity(python_artifacts.len()); - for python_artifact in python_artifacts { - let (artifact, metadata) = package_db - // No need for a WheelBuilder here since any builds should have been done during the - // [`python::resolve_dependencies`] call. - .get_metadata(&python_artifact.artifacts, None) - .await - .expect("failed to get metadata for a package for which we have already fetched metadata during solving.") - .expect("no metadata for a package for which we have already fetched metadata during solving."); - - let pkg_data = PypiPackageData { - name: python_artifact.name.to_string(), - version: python_artifact.version, - requires_dist: metadata.requires_dist, - requires_python: metadata.requires_python, - url: artifact.url.clone(), - hash: artifact - .hashes - .as_ref() - .and_then(|hash| PackageHashes::from_hashes(None, hash.sha256)), - }; - - let pkg_env = PypiPackageEnvironmentData { - extras: python_artifact - .extras - .into_iter() - .map(|e| e.as_str().to_string()) - .collect(), - }; - - locked_packages.push((pkg_data, pkg_env)); - } - - Ok(locked_packages) -} - -/// Solves the conda package environment for the given input. This function is async because it -/// spawns a background task for the solver. Since solving is a CPU intensive task we do not want to -/// block the main task. -pub fn resolve_conda_dependencies( - specs: Vec, - virtual_packages: Vec, - locked_packages: Vec, - available_packages: Vec>, -) -> miette::Result { - // Construct a solver task that we can start solving. - let task = rattler_solve::SolverTask { - specs, - available_packages: &available_packages, - locked_packages, - pinned_packages: vec![], - virtual_packages, - timeout: None, - }; - - // Solve the task - resolvo::Solver.solve(task).into_diagnostic() -} diff --git a/src/lock_file/outdated.rs b/src/lock_file/outdated.rs new file mode 100644 index 000000000..12220d37f --- /dev/null +++ b/src/lock_file/outdated.rs @@ -0,0 +1,311 @@ +use super::{verify_environment_satisfiability, verify_platform_satisfiability, PlatformUnsat}; +use crate::{consts, project::Environment, project::SolveGroup, Project}; +use itertools::Itertools; +use rattler_conda_types::Platform; +use rattler_lock::{LockFile, Package}; +use std::collections::{HashMap, HashSet}; + +/// A struct that contains information about specific outdated environments. +/// +/// Use the [`OutdatedEnvironments::from_project_and_lock_file`] to create an instance of this +/// struct by examining the project and lock-file and finding any mismatches. +pub struct OutdatedEnvironments<'p> { + /// The conda environments that are considered out of date with the lock-file. + pub conda: HashMap, HashSet>, + + /// The pypi environments that are considered out of date with the lock-file. + pub pypi: HashMap, HashSet>, +} + +impl<'p> OutdatedEnvironments<'p> { + /// Constructs a new instance of this struct by examining the project and lock-file and finding + /// any mismatches. + pub fn from_project_and_lock_file(project: &'p Project, lock_file: &LockFile) -> Self { + let mut outdated_conda: HashMap<_, HashSet<_>> = HashMap::new(); + let mut outdated_pypi: HashMap<_, HashSet<_>> = HashMap::new(); + + // Find all targets that are not satisfied by the lock-file + find_unsatisfiable_targets(project, lock_file, &mut outdated_conda, &mut outdated_pypi); + + // Extend the outdated targets to include the solve groups + let (mut conda_solve_groups_out_of_date, mut pypi_solve_groups_out_of_date) = + map_outdated_targets_to_solve_groups(&outdated_conda, &outdated_pypi); + + // Find all the solve groups that have inconsistent dependencies between environments. + find_inconsistent_solve_groups( + project, + lock_file, + &outdated_conda, + &mut conda_solve_groups_out_of_date, + &mut pypi_solve_groups_out_of_date, + ); + + // Mark the rest of the environments out of date for all solve groups + for (solve_group, platforms) in conda_solve_groups_out_of_date { + for env in solve_group.environments() { + outdated_conda + .entry(env.clone()) + .or_default() + .extend(platforms.iter().copied()); + } + } + + for (solve_group, platforms) in pypi_solve_groups_out_of_date { + for env in solve_group.environments() { + outdated_pypi + .entry(env.clone()) + .or_default() + .extend(platforms.iter().copied()); + } + } + + // For all targets where conda is out of date, the pypi packages are also out of date. + for (environment, platforms) in outdated_conda.iter() { + outdated_pypi + .entry(environment.clone()) + .or_default() + .extend(platforms.iter().copied()); + } + + Self { + conda: outdated_conda, + pypi: outdated_pypi, + } + } + + /// Returns true if the lock-file is up-to-date with the project (e.g. there are no + /// outdated targets). + pub fn is_empty(&self) -> bool { + self.conda.is_empty() && self.pypi.is_empty() + } +} + +/// Find all targets (combination of environment and platform) who's requirements in the `project` +/// are not satisfied by the `lock_file`. +fn find_unsatisfiable_targets<'p>( + project: &'p Project, + lock_file: &LockFile, + outdated_conda: &mut HashMap, HashSet>, + outdated_pypi: &mut HashMap, HashSet>, +) { + for environment in project.environments() { + let platforms = environment.platforms(); + + // Get the locked environment from the environment + let Some(locked_environment) = lock_file.environment(environment.name().as_str()) else { + tracing::info!( + "environment '{0}' is out of date because it does not exist in the lock-file.", + environment.name().fancy_display() + ); + + outdated_conda + .entry(environment.clone()) + .or_default() + .extend(platforms); + + continue; + }; + + // The locked environment exists, but does it match our project environment? + if let Err(unsat) = verify_environment_satisfiability(&environment, &locked_environment) { + tracing::info!( + "environment '{0}' is out of date because {unsat}", + environment.name().fancy_display() + ); + + outdated_conda + .entry(environment.clone()) + .or_default() + .extend(platforms); + + continue; + } + + // Verify each individual platform + for platform in platforms { + match verify_platform_satisfiability(&environment, &locked_environment, platform) { + Ok(_) => {} + Err(unsat @ PlatformUnsat::UnsatisfiableRequirement(_, _)) => { + tracing::info!( + "the pypi dependencies of environment '{0}' for platform {platform} are out of date because {unsat}", + environment.name().fancy_display() + ); + + outdated_pypi + .entry(environment.clone()) + .or_default() + .insert(platform); + } + Err(unsat) => { + tracing::info!( + "the dependencies of environment '{0}' for platform {platform} are out of date because {unsat}", + environment.name().fancy_display() + ); + + outdated_conda + .entry(environment.clone()) + .or_default() + .insert(platform); + } + } + } + } +} + +/// Given a mapping of outdated targets, construct a new mapping of all the groups that are out of +/// date. +/// +/// If one of the environments in a solve-group is no longer satisfied by the lock-file all the +/// environments in the same solve-group have to be recomputed. +fn map_outdated_targets_to_solve_groups<'p>( + outdated_conda: &HashMap, HashSet>, + outdated_pypi: &HashMap, HashSet>, +) -> ( + HashMap, HashSet>, + HashMap, HashSet>, +) { + let mut conda_solve_groups_out_of_date = HashMap::new(); + let mut pypi_solve_groups_out_of_date = HashMap::new(); + + // For each environment that is out of date, add it to the solve group. + for (environment, platforms) in outdated_conda.iter() { + let Some(solve_group) = environment.solve_group() else { + continue; + }; + conda_solve_groups_out_of_date + .entry(solve_group) + .or_insert_with(HashSet::new) + .extend(platforms.iter().copied()); + } + + // For each environment that is out of date, add it to the solve group. + for (environment, platforms) in outdated_pypi.iter() { + let Some(solve_group) = environment.solve_group() else { + continue; + }; + pypi_solve_groups_out_of_date + .entry(solve_group) + .or_insert_with(HashSet::new) + .extend(platforms.iter().copied()); + } + + ( + conda_solve_groups_out_of_date, + pypi_solve_groups_out_of_date, + ) +} + +/// Given a `project` and `lock_file`, finds all the solve-groups that have inconsistent +/// dependencies between environments. +/// +/// All environments in a solve-group must share the same dependencies. This function iterates over +/// solve-groups and checks if the dependencies of all its environments are the same. For each +/// package name, only one candidate is allowed. +fn find_inconsistent_solve_groups<'p>( + project: &'p Project, + lock_file: &LockFile, + outdated_conda: &HashMap, HashSet>, + conda_solve_groups_out_of_date: &mut HashMap, HashSet>, + pypi_solve_groups_out_of_date: &mut HashMap, HashSet>, +) { + let solve_groups = project.solve_groups(); + let solve_groups_and_platforms = solve_groups.iter().flat_map(|solve_group| { + solve_group + .environments() + .flat_map(|env| env.platforms()) + .unique() + .map(move |platform| (solve_group, platform)) + }); + + for (solve_group, platform) in solve_groups_and_platforms { + // Keep track of if any of the package types are out of date + let mut conda_package_mismatch = false; + let mut pypi_package_mismatch = false; + + // Keep track of the packages by name to check for mismatches between environments. + let mut conda_packages_by_name = HashMap::new(); + let mut pypi_packages_by_name = HashMap::new(); + + // Iterate over all environments to compare the packages. + for env in solve_group.environments() { + if outdated_conda + .get(&env) + .and_then(|p| p.get(&platform)) + .is_some() + { + // If the environment is already out-of-date there is no need to check it, + // because the solve-group is already out-of-date. + break; + } + + let Some(locked_env) = lock_file.environment(env.name().as_str()) else { + // If the environment is missing, we already marked it as out of date. + continue; + }; + + for package in locked_env.packages(platform).into_iter().flatten() { + match package { + Package::Conda(pkg) => { + match conda_packages_by_name.get(&pkg.package_record().name) { + None => { + conda_packages_by_name + .insert(pkg.package_record().name.clone(), pkg.url().clone()); + } + Some(url) if pkg.url() != url => { + conda_package_mismatch = true; + } + _ => {} + } + } + Package::Pypi(pkg) => { + match pypi_packages_by_name.get(&pkg.data().package.name) { + None => { + pypi_packages_by_name + .insert(pkg.data().package.name.clone(), pkg.url().clone()); + } + Some(url) if pkg.url() != url => { + pypi_package_mismatch = true; + } + _ => {} + } + } + } + + // If there is a conda package mismatch there is also a pypi mismatch and we + // can break early. + if conda_package_mismatch { + pypi_package_mismatch = true; + break; + } + } + + // If there is a conda package mismatch there is also a pypi mismatch and we can + // break early. + if conda_package_mismatch { + pypi_package_mismatch = true; + break; + } + } + + // If there is a mismatch there is a mismatch for the entire group + if conda_package_mismatch { + tracing::info!("the locked conda packages in solve group {} are not consistent for all environments for platform {}", + consts::SOLVE_GROUP_STYLE.apply_to(solve_group.name()), + consts::PLATFORM_STYLE.apply_to(platform)); + conda_solve_groups_out_of_date + .entry(solve_group.clone()) + .or_default() + .insert(platform); + } + + if pypi_package_mismatch && !conda_package_mismatch { + tracing::info!("the locked pypi packages in solve group {} are not consistent for all environments for platform {}", + consts::SOLVE_GROUP_STYLE.apply_to(solve_group.name()), + consts::PLATFORM_STYLE.apply_to(platform)); + pypi_solve_groups_out_of_date + .entry(solve_group.clone()) + .or_default() + .insert(platform); + } + } +} diff --git a/src/lock_file/records_by_name.rs b/src/lock_file/records_by_name.rs new file mode 100644 index 000000000..30194068e --- /dev/null +++ b/src/lock_file/records_by_name.rs @@ -0,0 +1,199 @@ +use crate::lock_file::{PypiPackageIdentifier, PypiRecord}; +use rattler_conda_types::{PackageName, RepoDataRecord}; +use std::borrow::Borrow; +use std::collections::hash_map::Entry; +use std::collections::{HashMap, HashSet}; +use std::hash::Hash; +use std::str::FromStr; + +/// A struct that holds both a ``Vec` of `RepoDataRecord` and a mapping from name to index. +#[derive(Clone, Debug, Default)] +pub struct RepoDataRecordsByName { + pub records: Vec, + by_name: HashMap, +} + +impl From> for RepoDataRecordsByName { + fn from(records: Vec) -> Self { + let by_name = records + .iter() + .enumerate() + .map(|(idx, record)| (record.package_record.name.clone(), idx)) + .collect(); + Self { records, by_name } + } +} + +impl RepoDataRecordsByName { + /// Returns the record with the given name or `None` if no such record exists. + pub fn by_name(&self, key: &Q) -> Option<&RepoDataRecord> + where + PackageName: Borrow, + Q: Hash + Eq, + { + self.by_name.get(key).map(|idx| &self.records[*idx]) + } + + /// Converts this instance into the internally stored records. + pub fn into_inner(self) -> Vec { + self.records + } + + /// Constructs a new instance from an iterator of repodata records. The records are + /// deduplicated where the record with the highest version wins. + pub fn from_iter>(iter: I) -> Self { + let iter = iter.into_iter(); + let min_size = iter.size_hint().0; + let mut by_name = HashMap::with_capacity(min_size); + let mut records = Vec::with_capacity(min_size); + for record in iter { + match by_name.entry(record.package_record.name.clone()) { + Entry::Vacant(entry) => { + let idx = records.len(); + records.push(record); + entry.insert(idx); + } + Entry::Occupied(entry) => { + // Use the entry with the highest version or otherwise the first we encounter. + let idx = *entry.get(); + if records[idx].package_record.version < record.package_record.version { + records[idx] = record; + } + } + } + } + + Self { records, by_name } + } + + /// Constructs a subset of the records in this set that only contain the packages with the given + /// names and recursively their dependencies. + pub fn subset( + &self, + package_names: impl IntoIterator, + virtual_packages: &HashSet, + ) -> Self { + let mut queue = package_names.into_iter().collect::>(); + let mut queued_names = queue.iter().cloned().collect::>(); + let mut records = Vec::new(); + let mut by_name = HashMap::new(); + while let Some(package) = queue.pop() { + // Find the record in the superset of records + let found_package = if virtual_packages.contains(&package) { + continue; + } else if let Some(record) = self.by_name(&package) { + record + } else { + continue; + }; + + // Find all the dependencies of the package and add them to the queue + for dependency in found_package.package_record.depends.iter() { + let dependency_name = PackageName::new_unchecked( + dependency.split_once(' ').unwrap_or((&dependency, "")).0, + ); + if queued_names.insert(dependency_name.clone()) { + queue.push(dependency_name); + } + } + + let idx = records.len(); + by_name.insert(package, idx); + records.push(found_package.clone()); + } + + Self { records, by_name } + } +} + +#[derive(Clone, Debug, Default)] +pub struct PypiRecordsByName { + pub records: Vec, + by_name: HashMap, +} + +impl PypiRecordsByName { + /// Returns the record with the given name or `None` if no such record exists. + pub fn by_name(&self, key: &Q) -> Option<&PypiRecord> + where + rip::types::PackageName: Borrow, + Q: Hash + Eq, + { + self.by_name.get(key).map(|idx| &self.records[*idx]) + } + + /// Converts this instance into the internally stored records. + pub fn into_inner(self) -> Vec { + self.records + } + + /// Constructs a new instance from an iterator of repodata records. The records are + /// deduplicated where the record with the highest version wins. + pub fn from_iter>(iter: I) -> Self { + let iter = iter.into_iter(); + let min_size = iter.size_hint().0; + let mut by_name = HashMap::with_capacity(min_size); + let mut records = Vec::with_capacity(min_size); + for record in iter { + let Ok(package_name) = rip::types::PackageName::from_str(&record.0.name) else { + continue; + }; + match by_name.entry(package_name) { + Entry::Vacant(entry) => { + let idx = records.len(); + records.push(record); + entry.insert(idx); + } + Entry::Occupied(entry) => { + // Use the entry with the highest version or otherwise the first we encounter. + let idx = *entry.get(); + if records[idx].0.version < record.0.version { + records[idx] = record; + } + } + } + } + + Self { records, by_name } + } + + /// Constructs a subset of the records in this set that only contain the packages with the given + /// names and recursively their dependencies. + pub fn subset( + &self, + package_names: impl IntoIterator, + conda_package_identifiers: &HashMap, + ) -> Self { + let mut queue = package_names.into_iter().collect::>(); + let mut queued_names = queue.iter().cloned().collect::>(); + let mut records = Vec::new(); + let mut by_name = HashMap::new(); + while let Some(package) = queue.pop() { + // Find the record in the superset of records + let found_package = if conda_package_identifiers.contains_key(&package) { + continue; + } else if let Some(record) = self.by_name(&package) { + record + } else { + continue; + }; + + // Find all the dependencies of the package and add them to the queue + for dependency in found_package.0.requires_dist.iter() { + let Ok(dependency_name) = rip::types::PackageName::from_str(&dependency.name) + else { + continue; + }; + if queued_names.insert(dependency_name.clone()) { + queue.push(dependency_name); + } + } + + let idx = records.len(); + by_name.insert(package, idx); + records.push(found_package.clone()); + } + + Self { records, by_name } + } +} diff --git a/src/lock_file/resolve.rs b/src/lock_file/resolve.rs new file mode 100644 index 000000000..98cb86fa3 --- /dev/null +++ b/src/lock_file/resolve.rs @@ -0,0 +1,113 @@ +//! This module contains code to resolve python package from PyPi or Conda packages. +//! +//! See [`resolve_pypi`] and [`resolve_conda`] for more information. + +use crate::{ + lock_file::{pypi, LockedCondaPackages, LockedPypiPackages, PypiRecord}, + project::manifest::{PyPiRequirement, SystemRequirements}, +}; +use indexmap::IndexMap; +use indicatif::ProgressBar; +use miette::IntoDiagnostic; +use rattler_conda_types::{GenericVirtualPackage, MatchSpec, Platform, RepoDataRecord}; +use rattler_lock::{PackageHashes, PypiPackageData, PypiPackageEnvironmentData}; +use rattler_solve::{resolvo, SolverImpl}; +use rip::{index::PackageDb, resolve::solve_options::SDistResolution}; +use std::{path::Path, sync::Arc}; + +/// This function takes as input a set of dependencies and system requirements and returns a set of +/// locked packages. +#[allow(clippy::too_many_arguments)] +pub async fn resolve_pypi( + package_db: Arc, + dependencies: IndexMap>, + system_requirements: SystemRequirements, + locked_conda_records: &[RepoDataRecord], + _locked_pypi_records: &[PypiRecord], + platform: Platform, + pb: &ProgressBar, + python_location: Option<&Path>, + sdist_resolution: SDistResolution, +) -> miette::Result { + // Solve python packages + pb.set_message("resolving pypi dependencies"); + let python_artifacts = pypi::resolve_dependencies( + package_db.clone(), + dependencies, + system_requirements, + platform, + locked_conda_records, + python_location, + sdist_resolution, + ) + .await?; + + // Clear message + pb.set_message(""); + + // Add pip packages + let mut locked_packages = LockedPypiPackages::with_capacity(python_artifacts.len()); + for python_artifact in python_artifacts { + let (artifact, metadata) = package_db + // No need for a WheelBuilder here since any builds should have been done during the + // [`python::resolve_dependencies`] call. + .get_metadata(&python_artifact.artifacts, None) + .await + .expect("failed to get metadata for a package for which we have already fetched metadata during solving.") + .expect("no metadata for a package for which we have already fetched metadata during solving."); + + let pkg_data = PypiPackageData { + name: python_artifact.name.to_string(), + version: python_artifact.version, + requires_dist: metadata.requires_dist, + requires_python: metadata.requires_python, + url: artifact.url.clone(), + hash: artifact + .hashes + .as_ref() + .and_then(|hash| PackageHashes::from_hashes(None, hash.sha256)), + }; + + let pkg_env = PypiPackageEnvironmentData { + extras: python_artifact + .extras + .into_iter() + .map(|e| e.as_str().to_string()) + .collect(), + }; + + locked_packages.push((pkg_data, pkg_env)); + } + + Ok(locked_packages) +} + +/// Solves the conda package environment for the given input. This function is async because it +/// spawns a background task for the solver. Since solving is a CPU intensive task we do not want to +/// block the main task. +pub async fn resolve_conda( + specs: Vec, + virtual_packages: Vec, + locked_packages: Vec, + available_packages: Vec>, +) -> miette::Result { + tokio::task::spawn_blocking(move || { + // Construct a solver task that we can start solving. + let task = rattler_solve::SolverTask { + specs, + available_packages: &available_packages, + locked_packages, + pinned_packages: vec![], + virtual_packages, + timeout: None, + }; + + // Solve the task + resolvo::Solver.solve(task).into_diagnostic() + }) + .await + .unwrap_or_else(|e| match e.try_into_panic() { + Ok(e) => std::panic::resume_unwind(e), + Err(_err) => Err(miette::miette!("cancelled")), + }) +} diff --git a/src/lock_file/update.rs b/src/lock_file/update.rs new file mode 100644 index 000000000..3c599aa2b --- /dev/null +++ b/src/lock_file/update.rs @@ -0,0 +1,1456 @@ +use crate::{ + config, consts, environment, + environment::{ + LockFileUsage, PerEnvironmentAndPlatform, PerGroup, PerGroupAndPlatform, PythonStatus, + }, + load_lock_file, lock_file, + lock_file::{ + update, OutdatedEnvironments, PypiPackageIdentifier, PypiRecordsByName, + RepoDataRecordsByName, + }, + prefix::Prefix, + progress::global_multi_progress, + project::{Environment, GroupedEnvironment, GroupedEnvironmentName}, + repodata::fetch_sparse_repodata_targets, + utils::BarrierCell, + EnvironmentName, Project, +}; +use futures::{future::Either, stream::FuturesUnordered, FutureExt, StreamExt, TryFutureExt}; +use indexmap::{IndexMap, IndexSet}; +use indicatif::ProgressBar; +use itertools::Itertools; +use miette::{IntoDiagnostic, WrapErr}; +use rattler::package_cache::PackageCache; +use rattler_conda_types::{Channel, MatchSpec, PackageName, Platform, RepoDataRecord}; +use rattler_lock::{LockFile, PypiPackageData, PypiPackageEnvironmentData}; +use rattler_repodata_gateway::sparse::SparseRepoData; +use rip::resolve::solve_options::SDistResolution; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + convert::identity, + future::{ready, Future}, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::sync::Semaphore; +use tracing::Instrument; + +impl Project { + /// Ensures that the lock-file is up-to-date with the project information. + /// + /// Returns the lock-file and any potential derived data that was computed as part of this + /// operation. + pub async fn up_to_date_lock_file( + &self, + options: UpdateLockFileOptions, + ) -> miette::Result> { + update::ensure_up_to_date_lock_file(self, options).await + } +} + +/// Options to pass to [`Project::up_to_date_lock_file`]. +#[derive(Default)] +pub struct UpdateLockFileOptions { + /// Defines what to do if the lock-file is out of date + pub lock_file_usage: LockFileUsage, + + /// Don't install anything to disk. + pub no_install: bool, + + /// Existing repodata that can be used to avoid downloading it again. + pub existing_repo_data: IndexMap<(Channel, Platform), SparseRepoData>, + + /// The maximum number of concurrent solves that are allowed to run. If this value is None + /// a heuristic is used based on the number of cores available from the system. + pub max_concurrent_solves: Option, +} + +/// A struct that holds the lock-file and any potential derived data that was computed when calling +/// `ensure_up_to_date_lock_file`. +pub struct LockFileDerivedData<'p> { + /// The lock-file + pub lock_file: LockFile, + + /// The package cache + pub package_cache: Arc, + + /// Repodata that was fetched + pub repo_data: IndexMap<(Channel, Platform), SparseRepoData>, + + /// A list of prefixes that are up-to-date with the latest conda packages. + pub updated_conda_prefixes: HashMap, (Prefix, PythonStatus)>, + + /// A list of prefixes that have been updated while resolving all dependencies. + pub updated_pypi_prefixes: HashMap, Prefix>, +} + +impl<'p> LockFileDerivedData<'p> { + /// Returns the up-to-date prefix for the given environment. + pub async fn prefix(&mut self, environment: &Environment<'p>) -> miette::Result { + if let Some(prefix) = self.updated_pypi_prefixes.get(environment) { + return Ok(prefix.clone()); + } + + // Get the prefix with the conda packages installed. + let platform = Platform::current(); + let package_db = environment.project().pypi_package_db()?; + let (prefix, python_status) = self.conda_prefix(environment).await?; + let repodata_records = self + .repodata_records(environment, platform) + .unwrap_or_default(); + let pypi_records = self.pypi_records(environment, platform).unwrap_or_default(); + + // Update the prefix with Pypi records + environment::update_prefix_pypi( + environment.name(), + &prefix, + platform, + package_db, + &repodata_records, + &pypi_records, + &python_status, + &environment.system_requirements(), + SDistResolution::default(), + ) + .await?; + + // Store that we updated the environment, so we won't have to do it again. + self.updated_pypi_prefixes + .insert(environment.clone(), prefix.clone()); + + Ok(prefix) + } + + fn pypi_records( + &self, + environment: &Environment<'p>, + platform: Platform, + ) -> Option> { + let locked_env = self + .lock_file + .environment(environment.name().as_str()) + .expect("the lock-file should be up-to-date so it should also include the environment"); + locked_env.pypi_packages_for_platform(platform) + } + + fn repodata_records( + &self, + environment: &Environment<'p>, + platform: Platform, + ) -> Option> { + let locked_env = self + .lock_file + .environment(environment.name().as_str()) + .expect("the lock-file should be up-to-date so it should also include the environment"); + locked_env.conda_repodata_records_for_platform(platform).expect("since the lock-file is up to date we should be able to extract the repodata records from it") + } + + async fn conda_prefix( + &mut self, + environment: &Environment<'p>, + ) -> miette::Result<(Prefix, PythonStatus)> { + // If we previously updated this environment, early out. + if let Some((prefix, python_status)) = self.updated_conda_prefixes.get(environment) { + return Ok((prefix.clone(), python_status.clone())); + } + + let prefix = Prefix::new(environment.dir()); + let platform = Platform::current(); + + // Determine the currently installed packages. + let installed_packages = prefix + .find_installed_packages(None) + .await + .with_context(|| { + format!( + "failed to determine the currently installed packages for '{}'", + environment.name(), + ) + })?; + + // Get the locked environment from the lock-file. + let records = self + .repodata_records(environment, platform) + .unwrap_or_default(); + + // Update the prefix with conda packages. + let python_status = environment::update_prefix_conda( + GroupedEnvironmentName::Environment(environment.name().clone()), + &prefix, + self.package_cache.clone(), + environment.project().authenticated_client().clone(), + installed_packages, + &records, + platform, + ) + .await?; + + // Store that we updated the environment, so we won't have to do it again. + self.updated_conda_prefixes + .insert(environment.clone(), (prefix.clone(), python_status.clone())); + + Ok((prefix, python_status)) + } +} + +#[derive(Default)] +struct UpdateContext<'p> { + /// Repodata that is available to the solve tasks. + repo_data: Arc>, + + /// Repodata records from the lock-file. This contains the records that actually exist in the + /// lock-file. If the lock-file is missing or partially missing then the data also won't exist + /// in this field. + locked_repodata_records: PerEnvironmentAndPlatform<'p, Arc>, + + /// Repodata records from the lock-file grouped by solve-group. + locked_grouped_repodata_records: PerGroupAndPlatform<'p, Arc>, + + /// Repodata records from the lock-file. This contains the records that actually exist in the + /// lock-file. If the lock-file is missing or partially missing then the data also won't exist + /// in this field. + locked_pypi_records: PerEnvironmentAndPlatform<'p, Arc>, + + /// Keeps track of all pending conda targets that are being solved. The mapping contains a + /// [`BarrierCell`] that will eventually contain the solved records computed by another task. + /// This allows tasks to wait for the records to be solved before proceeding. + solved_repodata_records: + PerEnvironmentAndPlatform<'p, Arc>>>, + + /// Keeps track of all pending grouped conda targets that are being solved. + grouped_solved_repodata_records: + PerGroupAndPlatform<'p, Arc>>>, + + /// Keeps track of all pending prefix updates. This only tracks the conda updates to a prefix, + /// not whether the pypi packages have also been updated. + instantiated_conda_prefixes: PerGroup<'p, Arc>>, + + /// Keeps track of all pending conda targets that are being solved. The mapping contains a + /// [`BarrierCell`] that will eventually contain the solved records computed by another task. + /// This allows tasks to wait for the records to be solved before proceeding. + solved_pypi_records: PerEnvironmentAndPlatform<'p, Arc>>>, + + /// Keeps track of all pending grouped pypi targets that are being solved. + grouped_solved_pypi_records: PerGroupAndPlatform<'p, Arc>>>, +} + +impl<'p> UpdateContext<'p> { + /// Returns a future that will resolve to the solved repodata records for the given environment + /// or `None` if the records do not exist and are also not in the process of being updated. + pub fn get_latest_repodata_records( + &self, + environment: &Environment<'p>, + platform: Platform, + ) -> Option>> { + self.solved_repodata_records + .get(environment) + .and_then(|records| records.get(&platform)) + .map(|records| { + let records = records.clone(); + Either::Left(async move { records.wait().await.clone() }) + }) + .or_else(|| { + self.locked_repodata_records + .get(environment) + .and_then(|records| records.get(&platform)) + .cloned() + .map(ready) + .map(Either::Right) + }) + } + + /// Returns a future that will resolve to the solved repodata records for the given environment + /// group or `None` if the records do not exist and are also not in the process of being + /// updated. + pub fn get_latest_group_repodata_records( + &self, + group: &GroupedEnvironment<'p>, + platform: Platform, + ) -> Option>> { + // Check if there is a pending operation for this group and platform + if let Some(pending_records) = self + .grouped_solved_repodata_records + .get(group) + .and_then(|records| records.get(&platform)) + .cloned() + { + return Some((async move { pending_records.wait().await.clone() }).left_future()); + } + + // Otherwise read the records directly from the lock-file. + let locked_records = self + .locked_grouped_repodata_records + .get(group) + .and_then(|records| records.get(&platform))? + .clone(); + + Some(ready(locked_records).right_future()) + } + + /// Takes the latest repodata records for the given environment and platform. Returns `None` if + /// neither the records exist nor are in the process of being updated. + /// + /// This function panics if the repodata records are still pending. + pub fn take_latest_repodata_records( + &mut self, + environment: &Environment<'p>, + platform: Platform, + ) -> Option { + self.solved_repodata_records + .get_mut(environment) + .and_then(|records| records.remove(&platform)) + .map(|cell| { + Arc::into_inner(cell) + .expect("records must not be shared") + .into_inner() + .expect("records must be available") + }) + .or_else(|| { + self.locked_repodata_records + .get_mut(environment) + .and_then(|records| records.remove(&platform)) + }) + .map(|records| Arc::try_unwrap(records).unwrap_or_else(|arc| (*arc).clone())) + } + + /// Takes the latest pypi records for the given environment and platform. Returns `None` if + /// neither the records exist nor are in the process of being updated. + /// + /// This function panics if the repodata records are still pending. + pub fn take_latest_pypi_records( + &mut self, + environment: &Environment<'p>, + platform: Platform, + ) -> Option { + self.solved_pypi_records + .get_mut(environment) + .and_then(|records| records.remove(&platform)) + .map(|cell| { + Arc::into_inner(cell) + .expect("records must not be shared") + .into_inner() + .expect("records must be available") + }) + .or_else(|| { + self.locked_pypi_records + .get_mut(environment) + .and_then(|records| records.remove(&platform)) + }) + .map(|records| Arc::try_unwrap(records).unwrap_or_else(|arc| (*arc).clone())) + } + + /// Get a list of conda prefixes that have been updated. + pub fn take_instantiated_conda_prefixes( + &mut self, + ) -> HashMap, (Prefix, PythonStatus)> { + self.instantiated_conda_prefixes + .drain() + .filter_map(|(env, cell)| match env { + GroupedEnvironment::Environment(env) => { + let prefix = Arc::into_inner(cell) + .expect("prefixes must not be shared") + .into_inner() + .expect("prefix must be available"); + Some((env, prefix)) + } + _ => None, + }) + .collect() + } + + /// Returns a future that will resolve to the solved repodata records for the given environment + /// or `None` if no task was spawned to instantiate the prefix. + pub fn get_conda_prefix( + &self, + environment: &GroupedEnvironment<'p>, + ) -> Option> { + let cell = self.instantiated_conda_prefixes.get(environment)?.clone(); + Some(async move { cell.wait().await.clone() }) + } +} + +/// Returns the default number of concurrent solves. +fn default_max_concurrent_solves() -> usize { + let available_parallelism = std::thread::available_parallelism().map_or(1, |n| n.get()); + (available_parallelism.saturating_sub(2)).min(4).max(1) +} + +/// Ensures that the lock-file is up-to-date with the project. +/// +/// This function will return a [`LockFileDerivedData`] struct that contains the lock-file and any +/// potential derived data that was computed as part of this function. The derived data might be +/// usable by other functions to avoid recomputing the same data. +/// +/// This function starts by checking if the lock-file is up-to-date. If it is not up-to-date it will +/// construct a task graph of all the work that needs to be done to update the lock-file. The tasks +/// are awaited in a specific order to make sure that we can start instantiating prefixes as soon as +/// possible. +pub async fn ensure_up_to_date_lock_file( + project: &Project, + options: UpdateLockFileOptions, +) -> miette::Result> { + let lock_file = load_lock_file(project).await?; + let current_platform = Platform::current(); + let package_cache = Arc::new(PackageCache::new(config::get_cache_dir()?.join("pkgs"))); + let max_concurrent_solves = options + .max_concurrent_solves + .unwrap_or_else(default_max_concurrent_solves); + let solve_semaphore = Arc::new(Semaphore::new(max_concurrent_solves)); + + // should we check the lock-file in the first place? + if !options.lock_file_usage.should_check_if_out_of_date() { + tracing::info!("skipping check if lock-file is up-to-date"); + + return Ok(LockFileDerivedData { + lock_file, + package_cache, + repo_data: options.existing_repo_data, + updated_conda_prefixes: Default::default(), + updated_pypi_prefixes: Default::default(), + }); + } + + // Check which environments are out of date. + let outdated = OutdatedEnvironments::from_project_and_lock_file(project, &lock_file); + if outdated.is_empty() { + tracing::info!("the lock-file is up-to-date"); + + // If no-environment is outdated we can return early. + return Ok(LockFileDerivedData { + lock_file, + package_cache, + repo_data: options.existing_repo_data, + updated_conda_prefixes: Default::default(), + updated_pypi_prefixes: Default::default(), + }); + } + + // If the lock-file is out of date, but we're not allowed to update it, we should exit. + if !options.lock_file_usage.allows_lock_file_updates() { + miette::bail!("lock-file not up-to-date with the project"); + } + + // Determine the repodata that we're going to need to solve the environments. For all outdated + // conda targets we take the union of all the channels that are used by the environment. + // + // The NoArch platform is always added regardless of whether it is explicitly used by the + // environment. + let mut fetch_targets = IndexSet::new(); + for (environment, platforms) in outdated.conda.iter() { + for channel in environment.channels() { + for platform in platforms { + fetch_targets.insert((channel.clone(), *platform)); + } + fetch_targets.insert((channel.clone(), Platform::NoArch)); + } + } + + // Fetch all the repodata that we need to solve the environments. + let mut repo_data = fetch_sparse_repodata_targets( + fetch_targets + .into_iter() + .filter(|target| !options.existing_repo_data.contains_key(target)), + project.authenticated_client(), + ) + .await?; + + // Add repo data that was already fetched + repo_data.extend(options.existing_repo_data); + + // Extract the current conda records from the lock-file + // TODO: Should we parallelize this? Measure please. + let locked_repodata_records = project + .environments() + .into_iter() + .flat_map(|env| { + lock_file + .environment(env.name().as_str()) + .into_iter() + .map(move |locked_env| { + locked_env.conda_repodata_records().map(|records| { + ( + env.clone(), + records + .into_iter() + .map(|(platform, records)| { + ( + platform, + Arc::new(RepoDataRecordsByName::from_iter(records)), + ) + }) + .collect(), + ) + }) + }) + }) + .collect::>, _>>() + .into_diagnostic()?; + + let locked_pypi_records = project + .environments() + .into_iter() + .flat_map(|env| { + lock_file + .environment(env.name().as_str()) + .into_iter() + .map(move |locked_env| { + ( + env.clone(), + locked_env + .pypi_packages() + .into_iter() + .map(|(platform, records)| { + (platform, Arc::new(PypiRecordsByName::from_iter(records))) + }) + .collect(), + ) + }) + }) + .collect::>>(); + + // Create a collection of all the [`GroupedEnvironments`] involved in the solve. + let all_grouped_environments = project + .environments() + .into_iter() + .map(GroupedEnvironment::from) + .unique() + .collect_vec(); + + // For every grouped environment extract the data from the lock-file. If multiple environments in a single + // solve-group have different versions for a single package name than the record with the highest version is used. + // This logic is implemented in `RepoDataRecordsByName::from_iter`. This can happen if previously two environments + // did not share the same solve-group. + let locked_grouped_repodata_records = all_grouped_environments + .iter() + .filter_map(|group| { + let records = match group { + GroupedEnvironment::Environment(env) => locked_repodata_records.get(env)?.clone(), + GroupedEnvironment::Group(group) => { + let mut by_platform = HashMap::new(); + for env in group.environments() { + let Some(records) = locked_repodata_records.get(&env) else { + continue; + }; + + for (platform, records) in records.iter() { + by_platform + .entry(*platform) + .or_insert_with(Vec::new) + .extend(records.records.iter().cloned()); + } + } + + by_platform + .into_iter() + .map(|(platform, records)| { + ( + platform, + Arc::new(RepoDataRecordsByName::from_iter(records)), + ) + }) + .collect() + } + }; + Some((group.clone(), records)) + }) + .collect(); + + let mut context = UpdateContext { + repo_data: Arc::new(repo_data), + + locked_repodata_records, + locked_grouped_repodata_records, + locked_pypi_records, + + solved_repodata_records: HashMap::new(), + instantiated_conda_prefixes: HashMap::new(), + solved_pypi_records: HashMap::new(), + grouped_solved_repodata_records: HashMap::new(), + grouped_solved_pypi_records: HashMap::new(), + }; + + // This will keep track of all outstanding tasks that we need to wait for. All tasks are added + // to this list after they are spawned. This function blocks until all pending tasks have either + // completed or errored. + let mut pending_futures = FuturesUnordered::new(); + + // Spawn tasks for all the conda targets that are out of date. + for (environment, platforms) in outdated.conda { + // Turn the platforms into an IndexSet, so we have a little control over the order in which + // we solve the platforms. We want to solve the current platform first, so we can start + // instantiating prefixes if we have to. + let mut ordered_platforms = platforms.into_iter().collect::>(); + if let Some(current_platform_index) = ordered_platforms.get_index_of(¤t_platform) { + ordered_platforms.move_index(current_platform_index, 0); + } + + // Determine the source of the solve information + let source = GroupedEnvironment::from(environment.clone()); + + for platform in ordered_platforms { + // Is there an existing pending task to solve the group? + let group_solve_records = if let Some(cell) = context + .grouped_solved_repodata_records + .get(&source) + .and_then(|platforms| platforms.get(&platform)) + { + // Yes, we can reuse the existing cell. + cell.clone() + } else { + // No, we need to spawn a task to update for the entire solve group. + let locked_group_records = context + .locked_grouped_repodata_records + .get(&source) + .and_then(|records| records.get(&platform)) + .cloned() + .unwrap_or_default(); + + // Spawn a task to solve the group. + let group_solve_task = spawn_solve_conda_environment_task( + source.clone(), + locked_group_records, + context.repo_data.clone(), + platform, + solve_semaphore.clone(), + ) + .boxed_local(); + + // Store the task so we can poll it later. + pending_futures.push(group_solve_task); + + // Create an entry that can be used by other tasks to wait for the result. + let cell = Arc::new(BarrierCell::new()); + let previous_cell = context + .grouped_solved_repodata_records + .entry(source.clone()) + .or_default() + .insert(platform, cell.clone()); + assert!( + previous_cell.is_none(), + "a cell has already been added to update conda records" + ); + + cell + }; + + // Spawn a task to extract the records from the group solve task. + let records_future = + spawn_extract_conda_environment_task(environment.clone(), platform, async move { + group_solve_records.wait().await.clone() + }) + .boxed_local(); + + pending_futures.push(records_future); + let previous_cell = context + .solved_repodata_records + .entry(environment.clone()) + .or_default() + .insert(platform, Arc::default()); + assert!( + previous_cell.is_none(), + "a cell has already been added to update conda records" + ); + } + } + + // Spawn tasks to instantiate prefixes that we need to be able to solve Pypi packages. + // + // Solving Pypi packages requires a python interpreter to be present in the prefix, therefore we + // first need to make sure we have conda packages available, then we can instantiate the + // prefix with at least the required conda packages (including a python interpreter) and then + // we can solve the Pypi packages using the installed interpreter. + // + // We only need to instantiate the prefix for the current platform. + for (environment, platforms) in outdated.pypi.iter() { + // Only instantiate a prefix if any of the platforms actually contain pypi dependencies. If + // there are no pypi-dependencies than solving is also not required and thus a prefix is + // also not required. + if !platforms + .iter() + .any(|p| !environment.pypi_dependencies(Some(*p)).is_empty()) + { + continue; + } + + // If we are not allowed to install, we can't instantiate a prefix. + if options.no_install { + miette::bail!("Cannot update pypi dependencies without first installing a conda prefix that includes python."); + } + + // Check if the group is already being instantiated + let group = GroupedEnvironment::from(environment.clone()); + if context.instantiated_conda_prefixes.contains_key(&group) { + continue; + } + + // Construct a future that will resolve when we have the repodata available for the current + // platform for this group. + let records_future = context + .get_latest_group_repodata_records(&group, current_platform) + .expect("conda records should be available now or in the future"); + + // Spawn a task to instantiate the environment + let environment_name = environment.name().clone(); + let pypi_env_task = + spawn_create_prefix_task(group.clone(), package_cache.clone(), records_future) + .map_err(move |e| { + e.context(format!( + "failed to instantiate a prefix for '{}'", + environment_name + )) + }) + .boxed_local(); + + pending_futures.push(pypi_env_task); + let previous_cell = context + .instantiated_conda_prefixes + .insert(group, Arc::new(BarrierCell::new())); + assert!( + previous_cell.is_none(), + "cannot update the same group twice" + ) + } + + // Spawn tasks to update the pypi packages. + for (environment, platform) in outdated + .pypi + .into_iter() + .flat_map(|(env, platforms)| platforms.into_iter().map(move |p| (env.clone(), p))) + { + let dependencies = environment.pypi_dependencies(Some(platform)); + if dependencies.is_empty() { + pending_futures.push( + ready(Ok(TaskResult::PypiSolved( + environment.name().clone(), + platform, + Arc::default(), + ))) + .boxed_local(), + ); + } else { + let group = GroupedEnvironment::from(environment.clone()); + + // Solve all the pypi records in the solve group together. + let grouped_pypi_records = if let Some(cell) = context + .grouped_solved_pypi_records + .get(&group) + .and_then(|records| records.get(&platform)) + { + // There is already a task to solve the pypi records for the group. + cell.clone() + } else { + // Construct a future that will resolve when we have the repodata available + let repodata_future = context + .get_latest_group_repodata_records(&group, platform) + .expect("conda records should be available now or in the future"); + + // Construct a future that will resolve when we have the conda prefix available + let prefix_future = context + .get_conda_prefix(&group) + .expect("prefix should be available now or in the future"); + + // Spawn a task to solve the pypi environment + let pypi_solve_future = spawn_solve_pypi_task( + group.clone(), + platform, + repodata_future, + prefix_future, + SDistResolution::default(), + ); + + pending_futures.push(pypi_solve_future.boxed_local()); + + let cell = Arc::new(BarrierCell::new()); + let previous_cell = context + .grouped_solved_pypi_records + .entry(group) + .or_default() + .insert(platform, cell.clone()); + assert!( + previous_cell.is_none(), + "a cell has already been added to update pypi records" + ); + + cell + }; + + // Followed by spawning a task to extract exactly the pypi records that are needed for + // this environment. + let pypi_records_future = async move { grouped_pypi_records.wait().await.clone() }; + let conda_records_future = context + .get_latest_repodata_records(&environment, platform) + .expect("must have conda records available"); + let records_future = spawn_extract_pypi_environment_task( + environment.clone(), + platform, + conda_records_future, + pypi_records_future, + ) + .boxed_local(); + pending_futures.push(records_future); + } + + let previous_cell = context + .solved_pypi_records + .entry(environment) + .or_default() + .insert(platform, Arc::default()); + assert!( + previous_cell.is_none(), + "a cell has already been added to extract pypi records" + ); + } + + let top_level_progress = + global_multi_progress().add(ProgressBar::new(pending_futures.len() as u64)); + top_level_progress.set_style(indicatif::ProgressStyle::default_bar() + .template("{spinner:.cyan} {prefix:20!} [{elapsed_precise}] [{bar:40!.bright.yellow/dim.white}] {pos:>4}/{len:4} {wide_msg:.dim}").unwrap() + .progress_chars("━━╾─")); + top_level_progress.enable_steady_tick(Duration::from_millis(50)); + top_level_progress.set_prefix("updating lock-file"); + + // Iterate over all the futures we spawned and wait for them to complete. + // + // The spawned futures each result either in an error or in a `TaskResult`. The `TaskResult` + // contains the result of the task. The results are stored into [`BarrierCell`]s which allows + // other tasks to respond to the data becoming available. + // + // A loop on the main task is used versus individually spawning all tasks for two reasons: + // + // 1. This provides some control over when data is polled and broadcasted to other tasks. No + // data is broadcasted until we start polling futures here. This reduces the risk of + // race-conditions where data has already been broadcasted before a task subscribes to it. + // 2. The futures stored in `pending_futures` do not necessarily have to be `'static`. Which + // makes them easier to work with. + while let Some(result) = pending_futures.next().await { + top_level_progress.inc(1); + match result? { + TaskResult::CondaGroupSolved(group_name, platform, records, duration) => { + let group = GroupedEnvironment::from_name(project, &group_name) + .expect("group should exist"); + + context + .grouped_solved_repodata_records + .get_mut(&group) + .expect("the entry for this environment should exist") + .get_mut(&platform) + .expect("the entry for this platform should exist") + .set(Arc::new(records)) + .expect("records should not be solved twice"); + + match group_name { + GroupedEnvironmentName::Group(_) => { + tracing::info!( + "resolved conda environment for solve group '{}' '{}' in {}", + group_name.fancy_display(), + consts::PLATFORM_STYLE.apply_to(platform), + humantime::format_duration(duration) + ); + } + GroupedEnvironmentName::Environment(env_name) => { + tracing::info!( + "resolved conda environment for environment '{}' '{}' in {}", + env_name.fancy_display(), + consts::PLATFORM_STYLE.apply_to(platform), + humantime::format_duration(duration) + ); + } + } + } + TaskResult::CondaSolved(environment, platform, records) => { + let environment = project + .environment(&environment) + .expect("environment should exist"); + + context + .solved_repodata_records + .get_mut(&environment) + .expect("the entry for this environment should exist") + .get_mut(&platform) + .expect("the entry for this platform should exist") + .set(records) + .expect("records should not be solved twice"); + + let group = GroupedEnvironment::from(environment.clone()); + if matches!(group, GroupedEnvironment::Group(_)) { + tracing::info!( + "extracted conda packages for '{}' '{}' from the '{}' group", + environment.name().fancy_display(), + consts::PLATFORM_STYLE.apply_to(platform), + group.name().fancy_display(), + ); + } + } + TaskResult::CondaPrefixUpdated(group_name, prefix, python_status, duration) => { + let group = GroupedEnvironment::from_name(project, &group_name) + .expect("grouped environment should exist"); + + context + .instantiated_conda_prefixes + .get_mut(&group) + .expect("the entry for this environment should exists") + .set((prefix, *python_status)) + .expect("prefix should not be instantiated twice"); + + tracing::info!( + "updated conda packages in the '{}' prefix in {}", + group.name().fancy_display(), + humantime::format_duration(duration) + ); + } + TaskResult::PypiGroupSolved(group_name, platform, records, duration) => { + let group = GroupedEnvironment::from_name(project, &group_name) + .expect("group should exist"); + + context + .grouped_solved_pypi_records + .get_mut(&group) + .expect("the entry for this environment should exist") + .get_mut(&platform) + .expect("the entry for this platform should exist") + .set(Arc::new(records)) + .expect("records should not be solved twice"); + + match group_name { + GroupedEnvironmentName::Group(_) => { + tracing::info!( + "resolved pypi packages for solve group '{}' '{}' in {}", + group_name.fancy_display(), + consts::PLATFORM_STYLE.apply_to(platform), + humantime::format_duration(duration), + ); + } + GroupedEnvironmentName::Environment(env_name) => { + tracing::info!( + "resolved pypi packages for environment '{}' '{}' in {}", + env_name.fancy_display(), + consts::PLATFORM_STYLE.apply_to(platform), + humantime::format_duration(duration), + ); + } + } + } + TaskResult::PypiSolved(environment, platform, records) => { + let environment = project + .environment(&environment) + .expect("environment should exist"); + + context + .solved_pypi_records + .get_mut(&environment) + .expect("the entry for this environment should exist") + .get_mut(&platform) + .expect("the entry for this platform should exist") + .set(records) + .expect("records should not be solved twice"); + + let group = GroupedEnvironment::from(environment.clone()); + if matches!(group, GroupedEnvironment::Group(_)) { + tracing::info!( + "extracted pypi packages for '{}' '{}' from the '{}' group", + environment.name().fancy_display(), + consts::PLATFORM_STYLE.apply_to(platform), + group.name().fancy_display(), + ); + } + } + } + } + + // Construct a new lock-file containing all the updated or old records. + let mut builder = LockFile::builder(); + + // Iterate over all environments and add their records to the lock-file. + for environment in project.environments() { + builder.set_channels( + environment.name().as_str(), + environment + .channels() + .into_iter() + .map(|channel| rattler_lock::Channel::from(channel.base_url().to_string())), + ); + + for platform in environment.platforms() { + if let Some(records) = context.take_latest_repodata_records(&environment, platform) { + for record in records.into_inner() { + builder.add_conda_package(environment.name().as_str(), platform, record.into()); + } + } + if let Some(records) = context.take_latest_pypi_records(&environment, platform) { + for (pkg_data, pkg_env_data) in records.into_inner() { + builder.add_pypi_package( + environment.name().as_str(), + platform, + pkg_data, + pkg_env_data, + ); + } + } + } + } + + // Store the lock file + let lock_file = builder.finish(); + lock_file + .to_path(&project.lock_file_path()) + .into_diagnostic() + .context("failed to write lock-file to disk")?; + + top_level_progress.finish_and_clear(); + + Ok(LockFileDerivedData { + lock_file, + package_cache, + updated_conda_prefixes: context.take_instantiated_conda_prefixes(), + updated_pypi_prefixes: HashMap::default(), + repo_data: Arc::into_inner(context.repo_data) + .expect("repo data should not be shared anymore"), + }) +} + +/// Represents data that is sent back from a task. This is used to communicate the result of a task +/// back to the main task which will forward the information to other tasks waiting for results. +enum TaskResult { + CondaGroupSolved( + GroupedEnvironmentName, + Platform, + RepoDataRecordsByName, + Duration, + ), + CondaSolved(EnvironmentName, Platform, Arc), + CondaPrefixUpdated(GroupedEnvironmentName, Prefix, Box, Duration), + PypiGroupSolved( + GroupedEnvironmentName, + Platform, + PypiRecordsByName, + Duration, + ), + PypiSolved(EnvironmentName, Platform, Arc), +} + +/// A task that solves the conda dependencies for a given environment. +async fn spawn_solve_conda_environment_task( + group: GroupedEnvironment<'_>, + existing_repodata_records: Arc, + sparse_repo_data: Arc>, + platform: Platform, + concurrency_semaphore: Arc, +) -> miette::Result { + // Get the dependencies for this platform + let dependencies = group.dependencies(None, Some(platform)); + + // Get the virtual packages for this platform + let virtual_packages = group.virtual_packages(platform); + + // Get the environment name + let group_name = group.name(); + + // The list of channels and platforms we need for this task + let channels = group.channels().into_iter().cloned().collect_vec(); + + // Capture local variables + let sparse_repo_data = sparse_repo_data.clone(); + + // Whether there are pypi dependencies, and we should fetch purls. + let has_pypi_dependencies = group.has_pypi_dependencies(); + + tokio::spawn( + async move { + let _permit = concurrency_semaphore + .acquire() + .await + .expect("the semaphore is never closed"); + + let pb = SolveProgressBar::new( + global_multi_progress().add(ProgressBar::hidden()), + platform, + group_name.clone(), + ); + pb.start(); + + let start = Instant::now(); + + // Convert the dependencies into match specs + let match_specs = dependencies + .iter_specs() + .map(|(name, constraint)| { + MatchSpec::from_nameless(constraint.clone(), Some(name.clone())) + }) + .collect_vec(); + + // Extract the package names from the dependencies + let package_names = dependencies.names().cloned().collect_vec(); + + // Extract the repo data records needed to solve the environment. + pb.set_message("loading repodata"); + let available_packages = load_sparse_repo_data_async( + package_names.clone(), + sparse_repo_data, + channels, + platform, + ) + .await?; + + // Solve conda packages + pb.set_message("resolving conda"); + let mut records = lock_file::resolve_conda( + match_specs, + virtual_packages, + existing_repodata_records.records.clone(), + available_packages, + ) + .await + .with_context(|| { + format!( + "failed to solve the conda requirements of '{}' '{}'", + group_name.fancy_display(), + consts::PLATFORM_STYLE.apply_to(platform) + ) + })?; + + // Add purl's for the conda packages that are also available as pypi packages if we need them. + if has_pypi_dependencies { + lock_file::pypi::amend_pypi_purls(&mut records).await?; + } + + // Turn the records into a map by name + let records_by_name = RepoDataRecordsByName::from(records); + + let end = Instant::now(); + + // Finish the progress bar + pb.finish(); + + Ok(TaskResult::CondaGroupSolved( + group_name, + platform, + records_by_name, + end - start, + )) + } + .instrument(tracing::info_span!( + "resolve_conda", + group = %group.name().as_str(), + platform = %platform + )), + ) + .await + .unwrap_or_else(|e| match e.try_into_panic() { + Ok(panic) => std::panic::resume_unwind(panic), + Err(_err) => Err(miette::miette!("the operation was cancelled")), + }) +} + +/// Distill the repodata that is applicable for the given `environment` from the repodata of an entire solve group. +async fn spawn_extract_conda_environment_task( + environment: Environment<'_>, + platform: Platform, + solve_group_records: impl Future>, +) -> miette::Result { + let group = GroupedEnvironment::from(environment.clone()); + + // Await the records from the group + let group_records = solve_group_records.await; + + // If the group is just the environment on its own we can immediately return the records. + let records = match group { + GroupedEnvironment::Environment(_) => { + // For a single environment group we can just clone the Arc + group_records.clone() + } + GroupedEnvironment::Group(_) => { + let virtual_package_names = group + .virtual_packages(platform) + .into_iter() + .map(|vp| vp.name) + .collect::>(); + + let environment_dependencies = environment.dependencies(None, Some(platform)); + Arc::new(group_records.subset( + environment_dependencies.into_iter().map(|(name, _)| name), + &virtual_package_names, + )) + } + }; + + Ok(TaskResult::CondaSolved( + environment.name().clone(), + platform, + records, + )) +} + +async fn spawn_extract_pypi_environment_task( + environment: Environment<'_>, + platform: Platform, + conda_records: impl Future>, + solve_group_records: impl Future>, +) -> miette::Result { + let group = GroupedEnvironment::from(environment.clone()); + let dependencies = environment.pypi_dependencies(Some(platform)); + + let records = match group { + GroupedEnvironment::Environment(_) => { + // For a single environment group we can just clone the Arc. + solve_group_records.await.clone() + } + GroupedEnvironment::Group(_) => { + // Convert all the conda records to package identifiers. + let conda_package_identifiers = conda_records + .await + .records + .iter() + .filter_map(|record| PypiPackageIdentifier::from_record(record).ok()) + .flatten() + .map(|identifier| (identifier.name.clone().into(), identifier)) + .collect::>(); + + Arc::new( + solve_group_records + .await + .subset(dependencies.into_keys(), &conda_package_identifiers), + ) + } + }; + + Ok(TaskResult::PypiSolved( + environment.name().clone(), + platform, + records, + )) +} + +/// A task that solves the pypi dependencies for a given environment. +async fn spawn_solve_pypi_task( + environment: GroupedEnvironment<'_>, + platform: Platform, + repodata_records: impl Future>, + prefix: impl Future, + sdist_resolution: SDistResolution, +) -> miette::Result { + // Get the Pypi dependencies for this environment + let dependencies = environment.pypi_dependencies(Some(platform)); + if dependencies.is_empty() { + return Ok(TaskResult::PypiGroupSolved( + environment.name().clone(), + platform, + PypiRecordsByName::default(), + Duration::from_millis(0), + )); + } + + // Get the system requirements for this environment + let system_requirements = environment.system_requirements(); + + // Get the package database + let package_db = environment.project().pypi_package_db()?; + + // Wait until the conda records and prefix are available. + let (repodata_records, (prefix, python_status)) = tokio::join!(repodata_records, prefix); + + let environment_name = environment.name().clone(); + let (pypi_packages, duration) = tokio::spawn( + async move { + let pb = SolveProgressBar::new( + global_multi_progress().add(ProgressBar::hidden()), + platform, + environment_name, + ); + pb.start(); + + let start = Instant::now(); + + let records = lock_file::resolve_pypi( + package_db, + dependencies, + system_requirements, + &repodata_records.records, + &[], + platform, + &pb.pb, + python_status + .location() + .map(|path| prefix.root().join(path)) + .as_deref(), + sdist_resolution, + ) + .await?; + + let end = Instant::now(); + + pb.finish(); + + Ok((PypiRecordsByName::from_iter(records), end - start)) + } + .instrument(tracing::info_span!( + "resolve_pypi", + group = %environment.name().as_str(), + platform = %platform + )), + ) + .await + .unwrap_or_else(|e| match e.try_into_panic() { + Ok(panic) => std::panic::resume_unwind(panic), + Err(_err) => Err(miette::miette!("the operation was cancelled")), + })?; + + Ok(TaskResult::PypiGroupSolved( + environment.name().clone(), + platform, + pypi_packages, + duration, + )) +} + +/// Updates the prefix for the given environment. +/// +/// This function will wait until the conda records for the prefix are available. +async fn spawn_create_prefix_task( + group: GroupedEnvironment<'_>, + package_cache: Arc, + conda_records: impl Future>, +) -> miette::Result { + let group_name = group.name().clone(); + let prefix = group.prefix(); + let client = group.project().authenticated_client().clone(); + + // Spawn a task to determine the currently installed packages. + let installed_packages_future = tokio::spawn({ + let prefix = prefix.clone(); + async move { prefix.find_installed_packages(None).await } + }) + .unwrap_or_else(|e| match e.try_into_panic() { + Ok(panic) => std::panic::resume_unwind(panic), + Err(_err) => Err(miette::miette!("the operation was cancelled")), + }); + + // Wait until the conda records are available and until the installed packages for this prefix + // are available. + let (conda_records, installed_packages) = + tokio::try_join!(conda_records.map(Ok), installed_packages_future)?; + + // Spawn a background task to update the prefix + let (python_status, duration) = tokio::spawn({ + let prefix = prefix.clone(); + let group_name = group_name.clone(); + async move { + let start = Instant::now(); + let python_status = environment::update_prefix_conda( + group_name, + &prefix, + package_cache, + client, + installed_packages, + &conda_records.records, + Platform::current(), + ) + .await?; + let end = Instant::now(); + Ok((python_status, end - start)) + } + }) + .await + .unwrap_or_else(|e| match e.try_into_panic() { + Ok(panic) => std::panic::resume_unwind(panic), + Err(_err) => Err(miette::miette!("the operation was cancelled")), + })?; + + Ok(TaskResult::CondaPrefixUpdated( + group_name, + prefix, + Box::new(python_status), + duration, + )) +} + +/// Load the repodata records for the specified platform and package names in the background. This +/// is a CPU and IO intensive task so we run it in a blocking task to not block the main task. +pub async fn load_sparse_repo_data_async( + package_names: Vec, + sparse_repo_data: Arc>, + channels: Vec, + platform: Platform, +) -> miette::Result>> { + tokio::task::spawn_blocking(move || { + let sparse = channels + .into_iter() + .cartesian_product(vec![platform, Platform::NoArch]) + .filter_map(|target| sparse_repo_data.get(&target)); + + // Load only records we need for this platform + SparseRepoData::load_records_recursive(sparse, package_names, None).into_diagnostic() + }) + .await + .map_err(|e| match e.try_into_panic() { + Ok(panic) => std::panic::resume_unwind(panic), + Err(_err) => miette::miette!("the operation was cancelled"), + }) + .map_or_else(Err, identity) + .with_context(|| { + format!( + "failed to load repodata records for platform '{}'", + platform.as_str() + ) + }) +} + +/// A helper struct that manages a progress-bar for solving an environment. +#[derive(Clone)] +pub(crate) struct SolveProgressBar { + pb: ProgressBar, + platform: Platform, + environment_name: GroupedEnvironmentName, +} + +impl SolveProgressBar { + pub fn new( + pb: ProgressBar, + platform: Platform, + environment_name: GroupedEnvironmentName, + ) -> Self { + pb.set_style( + indicatif::ProgressStyle::with_template(&format!( + " ({:>12}) {:<9} ..", + environment_name.fancy_display(), + consts::PLATFORM_STYLE.apply_to(platform), + )) + .unwrap(), + ); + pb.enable_steady_tick(Duration::from_millis(100)); + Self { + pb, + platform, + environment_name, + } + } + + pub fn start(&self) { + self.pb.reset_elapsed(); + self.pb.set_style( + indicatif::ProgressStyle::with_template(&format!( + " {{spinner:.dim}} {:>12}: {:<9} [{{elapsed_precise}}] {{msg:.dim}}", + self.environment_name.fancy_display(), + consts::PLATFORM_STYLE.apply_to(self.platform), + )) + .unwrap(), + ); + } + + pub fn set_message(&self, msg: impl Into>) { + self.pb.set_message(msg); + } + + pub fn finish(&self) { + self.pb.set_style( + indicatif::ProgressStyle::with_template(&format!( + " {} ({:>12}) {:<9} [{{elapsed_precise}}]", + console::style(console::Emoji("✔", "↳")).green(), + self.environment_name.fancy_display(), + consts::PLATFORM_STYLE.apply_to(self.platform), + )) + .unwrap(), + ); + self.pb.finish_and_clear(); + } +} diff --git a/src/project/grouped_environment.rs b/src/project/grouped_environment.rs new file mode 100644 index 000000000..1c1a1a515 --- /dev/null +++ b/src/project/grouped_environment.rs @@ -0,0 +1,174 @@ +use crate::{ + consts, + prefix::Prefix, + project::{ + manifest::{PyPiRequirement, SystemRequirements}, + virtual_packages::get_minimal_virtual_packages, + Dependencies, Environment, SolveGroup, + }, + EnvironmentName, Project, SpecType, +}; +use indexmap::{IndexMap, IndexSet}; +use rattler_conda_types::{Channel, GenericVirtualPackage, Platform}; +use std::path::PathBuf; + +/// Either a solve group or an individual environment without a solve group. +/// +/// If a solve group only contains a single environment then it is treated as a single environment, +/// not as a solve-group. +/// +/// Construct a `GroupedEnvironment` from a `SolveGroup` or `Environment` using `From` trait. +#[derive(Debug, Hash, Eq, PartialEq, Clone)] +pub enum GroupedEnvironment<'p> { + Group(SolveGroup<'p>), + Environment(Environment<'p>), +} + +impl<'p> From> for GroupedEnvironment<'p> { + fn from(source: SolveGroup<'p>) -> Self { + let mut envs = source.environments().peekable(); + let first = envs.next(); + let second = envs.peek(); + if second.is_some() { + GroupedEnvironment::Group(source) + } else if let Some(first) = first { + GroupedEnvironment::Environment(first) + } else { + unreachable!("empty solve group") + } + } +} + +impl<'p> From> for GroupedEnvironment<'p> { + fn from(source: Environment<'p>) -> Self { + match source.solve_group() { + Some(group) if group.environments().len() > 1 => GroupedEnvironment::Group(group), + _ => GroupedEnvironment::Environment(source), + } + } +} + +impl<'p> GroupedEnvironment<'p> { + /// Constructs a `GroupedEnvironment` from a `GroupedEnvironmentName`. + pub fn from_name(project: &'p Project, name: &GroupedEnvironmentName) -> Option { + match name { + GroupedEnvironmentName::Group(g) => { + Some(GroupedEnvironment::Group(project.solve_group(g)?)) + } + GroupedEnvironmentName::Environment(env) => { + Some(GroupedEnvironment::Environment(project.environment(env)?)) + } + } + } + + /// Returns the project to which the group belongs. + pub fn project(&self) -> &'p Project { + match self { + GroupedEnvironment::Group(group) => group.project(), + GroupedEnvironment::Environment(env) => env.project(), + } + } + + /// Returns the prefix of this group. + pub fn prefix(&self) -> Prefix { + Prefix::new(self.dir()) + } + + /// Returns the directory where the prefix of this instance is stored. + pub fn dir(&self) -> PathBuf { + match self { + GroupedEnvironment::Group(solve_group) => solve_group.dir(), + GroupedEnvironment::Environment(env) => env.dir(), + } + } + + /// Returns the name of the group. + pub fn name(&self) -> GroupedEnvironmentName { + match self { + GroupedEnvironment::Group(group) => { + GroupedEnvironmentName::Group(group.name().to_string()) + } + GroupedEnvironment::Environment(env) => { + GroupedEnvironmentName::Environment(env.name().clone()) + } + } + } + + /// Returns the dependencies of the group. + pub fn dependencies(&self, kind: Option, platform: Option) -> Dependencies { + match self { + GroupedEnvironment::Group(group) => group.dependencies(kind, platform), + GroupedEnvironment::Environment(env) => env.dependencies(kind, platform), + } + } + + /// Returns the pypi dependencies of the group. + pub fn pypi_dependencies( + &self, + platform: Option, + ) -> IndexMap> { + match self { + GroupedEnvironment::Group(group) => group.pypi_dependencies(platform), + GroupedEnvironment::Environment(env) => env.pypi_dependencies(platform), + } + } + + /// Returns the system requirements of the group. + pub fn system_requirements(&self) -> SystemRequirements { + match self { + GroupedEnvironment::Group(group) => group.system_requirements(), + GroupedEnvironment::Environment(env) => env.system_requirements(), + } + } + + /// Returns the virtual packages from the group based on the system requirements. + pub fn virtual_packages(&self, platform: Platform) -> Vec { + get_minimal_virtual_packages(platform, &self.system_requirements()) + .into_iter() + .map(GenericVirtualPackage::from) + .collect() + } + + /// Returns the channels used for the group. + pub fn channels(&self) -> IndexSet<&'p Channel> { + match self { + GroupedEnvironment::Group(group) => group.channels(), + GroupedEnvironment::Environment(env) => env.channels(), + } + } + + /// Returns true if the group has any Pypi dependencies. + pub fn has_pypi_dependencies(&self) -> bool { + match self { + GroupedEnvironment::Group(group) => group.has_pypi_dependencies(), + GroupedEnvironment::Environment(env) => env.has_pypi_dependencies(), + } + } +} + +/// A name of a [`GroupedEnvironment`]. +#[derive(Clone)] +pub enum GroupedEnvironmentName { + Group(String), + Environment(EnvironmentName), +} + +impl GroupedEnvironmentName { + /// Returns a fancy display of the name that can be used in the console. + pub fn fancy_display(&self) -> console::StyledObject<&str> { + match self { + GroupedEnvironmentName::Group(name) => { + consts::SOLVE_GROUP_STYLE.apply_to(name.as_str()) + } + GroupedEnvironmentName::Environment(name) => name.fancy_display(), + } + } + + /// Returns the name as a string slice. + pub fn as_str(&self) -> &str { + match self { + GroupedEnvironmentName::Group(group) => group.as_str(), + GroupedEnvironmentName::Environment(env) => env.as_str(), + } + } +} diff --git a/src/project/mod.rs b/src/project/mod.rs index dc76b65f0..0813eb1fb 100644 --- a/src/project/mod.rs +++ b/src/project/mod.rs @@ -1,6 +1,7 @@ mod dependencies; mod environment; pub mod errors; +mod grouped_environment; pub mod manifest; mod solve_group; pub mod virtual_packages; @@ -35,6 +36,7 @@ use url::Url; use crate::task::TaskName; pub use dependencies::Dependencies; pub use environment::Environment; +pub use grouped_environment::{GroupedEnvironment, GroupedEnvironmentName}; pub use solve_group::SolveGroup; /// The dependency types we support @@ -53,6 +55,7 @@ impl DependencyType { } } } + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] /// What kind of dependency spec do we have pub enum SpecType { @@ -219,6 +222,11 @@ impl Project { self.pixi_dir().join(consts::ENVIRONMENTS_DIR) } + /// Returns the solve group directory + pub fn solve_group_environments_dir(&self) -> PathBuf { + self.pixi_dir().join(consts::SOLVE_GROUP_ENVIRONMENTS_DIR) + } + /// Returns the path to the manifest file. pub fn manifest_path(&self) -> PathBuf { self.manifest.path.clone() diff --git a/src/project/solve_group.rs b/src/project/solve_group.rs index 9967f2e05..f6dc2228b 100644 --- a/src/project/solve_group.rs +++ b/src/project/solve_group.rs @@ -6,6 +6,7 @@ use itertools::{Either, Itertools}; use rattler_conda_types::{Channel, Platform}; use std::borrow::Cow; use std::hash::Hash; +use std::path::PathBuf; /// A grouping of environments that are solved together. #[derive(Debug, Clone)] @@ -44,8 +45,17 @@ impl<'p> SolveGroup<'p> { &self.solve_group.name } + /// Returns the directory where this solve group stores its environment + pub fn dir(&self) -> PathBuf { + self.project + .solve_group_environments_dir() + .join(self.name()) + } + /// Returns an iterator over all the environments that are part of the group. - pub fn environments(&self) -> impl Iterator> + DoubleEndedIterator + 'p { + pub fn environments( + &self, + ) -> impl Iterator> + DoubleEndedIterator + ExactSizeIterator + 'p { self.solve_group .environments .iter() diff --git a/src/utils/barrier_cell.rs b/src/utils/barrier_cell.rs index 07c94a6f6..892d003e7 100644 --- a/src/utils/barrier_cell.rs +++ b/src/utils/barrier_cell.rs @@ -6,6 +6,12 @@ use std::{ use thiserror::Error; use tokio::sync::Notify; +/// A synchronization primitive that can be used to wait for a value to become available. +/// +/// The [`BarrierCell`] is initially empty, requesters can wait for a value to become available +/// using the `wait` method. Once a value is available, the `set` method can be used to set the +/// value in the cell. The `set` method can only be called once. If the `set` method is called +/// multiple times, it will return an error. When `set` is called all waiters will be notified. pub struct BarrierCell { state: AtomicU8, value: UnsafeCell>, @@ -13,6 +19,7 @@ pub struct BarrierCell { } unsafe impl Sync for BarrierCell {} + unsafe impl Send for BarrierCell {} #[repr(u8)] @@ -50,7 +57,7 @@ impl BarrierCell { unsafe { (*self.value.get()).assume_init_ref() } } - /// Wait for a value to become available in the cell or return a writer which + /// Set the value in the cell, if the cell was already initialized this will return an error. pub fn set(&self, value: T) -> Result<(), SetError> { let state = self .state @@ -70,6 +77,7 @@ impl BarrierCell { Ok(()) } + /// Consumes this instance and converts it into the inner value if it has been initialized. pub fn into_inner(self) -> Option { if self.state.load(Ordering::Acquire) == BarrierCellState::Initialized as u8 { Some(unsafe { self.value.into_inner().assume_init() }) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 68faefd10..28199edd1 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -15,7 +15,7 @@ use pixi::{ task::{self, AddArgs, AliasArgs}, }, consts, EnvironmentName, ExecutableTask, Project, RunOutput, SearchEnvironments, TaskGraph, - TaskGraphError, UpdateLockFileOptions, + TaskGraphError, }; use rattler_conda_types::{MatchSpec, Platform}; @@ -25,6 +25,7 @@ use pixi::cli::LockFileUsageArgs; use pixi::task::TaskName; use pixi::FeatureName; use pixi::TaskExecutionError; +use pixi::UpdateLockFileOptions; use rattler_lock::{LockFile, Package}; use std::{ path::{Path, PathBuf}, diff --git a/tests/solve_group_tests.rs b/tests/solve_group_tests.rs index 913838f51..b38c17366 100644 --- a/tests/solve_group_tests.rs +++ b/tests/solve_group_tests.rs @@ -9,7 +9,7 @@ use url::Url; mod common; #[tokio::test] -async fn add_functionality() { +async fn conda_solve_group_functionality() { let mut package_database = PackageDatabase::default(); // Add a package `foo` with 3 different versions