Skip to content

Commit

Permalink
feat(mm): rename "blake3" to "blake3_multi"
Browse files Browse the repository at this point in the history
Just make it clearer which is which.
  • Loading branch information
psychedelicious committed Mar 21, 2024
1 parent 9cf03bf commit 6a980e8
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 14 deletions.
6 changes: 3 additions & 3 deletions docs/features/CONFIGURATION.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,18 +122,18 @@ The provided token will be added as a `Bearer` token to the network requests to
Models are hashed during installation, providing a stable identifier for models across all platforms. Hashing is a one-time operation.

```yaml
hashing_algorithm: blake3_single
hashing_algorithm: blake3_single # default value
```

You might want to change this setting, depending on your system:

- `blake3_single` (default): Single-threaded - best for spinning HDDs, still OK for SSDs
- `blake3`: Parallelized, memory-mapped implementation - best for SSDs, terrible for spinning disks
- `blake3_multi`: Parallelized, memory-mapped implementation - best for SSDs, terrible for spinning disks
- `random`: Skip hashing entirely - fastest but of course no hash

During the first startup after upgrading to v4, all of your models will be hashed. This can take a few minutes.

Most common algorithms are supported, like `md5`, `sha256`, and `sha512`. These are typically much, much slower than `blake3`.
Most common algorithms are supported, like `md5`, `sha256`, and `sha512`. These are typically much, much slower than either of the BLAKE3 variants.

#### Path Settings

Expand Down
4 changes: 2 additions & 2 deletions invokeai/app/services/config/config_default.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ class InvokeAIAppConfig(BaseSettings):
allow_nodes: List of nodes to allow. Omit to allow all.
deny_nodes: List of nodes to deny. Omit to deny none.
node_cache_size: How many cached nodes to keep in memory.
hashing_algorithm: Model hashing algorthim for model installs. 'blake3' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`, `blake3`, `blake3_single`, `random`
hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`
remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
"""

Expand Down Expand Up @@ -191,7 +191,7 @@ class InvokeAIAppConfig(BaseSettings):
node_cache_size: int = Field(default=512, description="How many cached nodes to keep in memory.")

# MODEL INSTALL
hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3_single", description="Model hashing algorthim for model installs. 'blake3' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3_single", description="Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")

# fmt: on
Expand Down
10 changes: 5 additions & 5 deletions invokeai/backend/model_hash/model_hash.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@
from invokeai.app.util.misc import uuid_string

HASHING_ALGORITHMS = Literal[
"blake3_multi",
"blake3_single",
"random",
"md5",
"sha1",
"sha224",
Expand All @@ -25,9 +28,6 @@
"sha3_512",
"shake_128",
"shake_256",
"blake3",
"blake3_single",
"random",
]
MODEL_FILE_EXTENSIONS = (".ckpt", ".safetensors", ".bin", ".pt", ".pth")

Expand Down Expand Up @@ -64,7 +64,7 @@ def __init__(
self, algorithm: HASHING_ALGORITHMS = "blake3_single", file_filter: Optional[Callable[[str], bool]] = None
) -> None:
self.algorithm: HASHING_ALGORITHMS = algorithm
if algorithm == "blake3":
if algorithm == "blake3_multi":
self._hash_file = self._blake3
elif algorithm == "blake3_single":
self._hash_file = self._blake3_single
Expand Down Expand Up @@ -226,4 +226,4 @@ def _default_file_filter(file_path: str) -> bool:
def _get_prefix(algorithm: HASHING_ALGORITHMS) -> str:
"""Return the prefix for the given algorithm, e.g. \"blake3:\" or \"md5:\"."""
# blake3_single is a single-threaded version of blake3, prefix should still be "blake3:"
return "blake3:" if algorithm == "blake3_single" else f"{algorithm}:"
return "blake3:" if algorithm == "blake3_single" or algorithm == "blake3_multi" else f"{algorithm}:"
8 changes: 4 additions & 4 deletions tests/test_model_hash.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"sha512",
"sha512:c4a10476b21e00042f638ad5755c561d91f2bb599d3504d25409495e1c7eda94543332a1a90fbb4efdaf9ee462c33e0336b5eae4acfb1fa0b186af452dd67dc6",
),
("blake3", "blake3:ce3f0c5f3c05d119f4a5dcaf209b50d3149046a0d3a9adee9fed4c83cad6b4d0"),
("blake3_multi", "blake3:ce3f0c5f3c05d119f4a5dcaf209b50d3149046a0d3a9adee9fed4c83cad6b4d0"),
("blake3_single", "blake3:ce3f0c5f3c05d119f4a5dcaf209b50d3149046a0d3a9adee9fed4c83cad6b4d0"),
]

Expand All @@ -29,7 +29,7 @@ def test_model_hash_hashes_file(tmp_path: Path, algorithm: HASHING_ALGORITHMS, e
assert hash_ == expected_hash


@pytest.mark.parametrize("algorithm", ["md5", "sha1", "sha256", "sha512", "blake3", "blake3_single"])
@pytest.mark.parametrize("algorithm", ["md5", "sha1", "sha256", "sha512", "blake3_multi", "blake3_single"])
def test_model_hash_hashes_dir(tmp_path: Path, algorithm: HASHING_ALGORITHMS):
model_hash = ModelHash(algorithm)
files = [Path(tmp_path, f"{i}.bin") for i in range(5)]
Expand Down Expand Up @@ -58,7 +58,7 @@ def test_model_hash_hashes_dir(tmp_path: Path, algorithm: HASHING_ALGORITHMS):
("sha1", "sha1:"),
("sha256", "sha256:"),
("sha512", "sha512:"),
("blake3", "blake3:"),
("blake3_multi", "blake3:"),
("blake3_single", "blake3:"),
],
)
Expand All @@ -67,7 +67,7 @@ def test_model_hash_gets_prefix(algorithm: HASHING_ALGORITHMS, expected_prefix:


def test_model_hash_blake3_matches_blake3_single(tmp_path: Path):
model_hash = ModelHash("blake3")
model_hash = ModelHash("blake3_multi")
model_hash_simple = ModelHash("blake3_single")

file = tmp_path / "test.bin"
Expand Down

0 comments on commit 6a980e8

Please sign in to comment.