diff --git a/.github/workflows/testing.yml b/.github/workflows/test.yml similarity index 84% rename from .github/workflows/testing.yml rename to .github/workflows/test.yml index 6334e3e..ed1412f 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/test.yml @@ -1,5 +1,5 @@ -run-name: Run Tests -on: [push] +name: Tests +on: [push, pull_request] jobs: @@ -10,7 +10,7 @@ jobs: strategy: matrix: - python-version: ["3.10"] + python-version: ["3.8", "3.9", "3.10"] steps: #---------------------------------------------- @@ -49,5 +49,5 @@ jobs: #---- Tests #---------------------------------------------- - - name: Run tests - run: poetry run python testing/run_tests.py + - name: πŸš€ Run tests with code coverage report + run: poetry run pytest --cov=dictdatabase --cov-report term-missing diff --git a/.gitignore b/.gitignore index d5778ec..5f68a7b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ .venv/ .ddb_storage_testing/ +.ddb_pytest_storage +ddb_storage test_db/ *.prof dist/ diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 6fb0197..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "python.pythonPath": ".venv/bin/python3", -} diff --git a/DictDataBase.code-workspace b/DictDataBase.code-workspace index 876a149..3f9af16 100644 --- a/DictDataBase.code-workspace +++ b/DictDataBase.code-workspace @@ -4,5 +4,7 @@ "path": "." } ], - "settings": {} -} \ No newline at end of file + "settings": { + "python.pythonPath": ".venv/bin/python3" + } +} diff --git a/README.md b/README.md index 4e72a25..78700ad 100644 --- a/README.md +++ b/README.md @@ -1,106 +1,174 @@ # DictDataBase + + [![Downloads](https://pepy.tech/badge/dictdatabase)](https://pepy.tech/project/dictdatabase) + [![Downloads](https://pepy.tech/badge/dictdatabase/month)](https://pepy.tech/project/dictdatabase) + [![Downloads](https://pepy.tech/badge/dictdatabase/week)](https://pepy.tech/project/dictdatabase) -DictDataBase is a simple but fast and secure database for handling dicts (or PathDicts for more advanced features), that uses json files as the underlying storage mechanism. -It is also multiprocessind and multithreading safe, due to the employed locking mechanisms. +DictDataBase is a simple but fast and secure database for handling dicts (or PathDicts for more advanced features), that uses json or compressed json as the underlying storage mechanism. It is: +- **Multi threading and multi processing safe**. Multiple processes on the same machine can simultaneously read and write to dicts without writes getting lost. +- **No database server** required. Simply import DictDataBase in your project and use it. +- **ACID** compliant. Unlike TinyDB, it is suited for concurrent environments. +- **Fast**. A dict can be accessed partially without having to parse the entire file, making the read and writes very efficient. +- **Tested** with over 400 test cases. -## Import +### Why use DictDataBase +- For example have a webserver dispatches database read and writes concurrently. +- If spinning up a database server is overkill for your app. + - But you still need [ACID](https://en.wikipedia.org/wiki/ACID) guarantees +- You have a big database, only want to access one key-value pair. DictDataBase can do this efficiently and fast. +- Your use case is suited for working with json data, or you have to work with a lot of json data. -```python -import DictDataBase as DDB -``` +### Why not DictDataBase +- If you need document indexes +- If your use case is better suited for a sql database -## Configuration +# Configuration +There are 5 configuration options: -There are 3 configuration options. +### Storage directory Set storage_directory to the path of the directory that will contain your database files: - ```python + DDB.config.storage_directory = "./ddb_storage" # Default value ``` +### Compression If you want to use compressed files, set use_compression to True. -This will make the db files significantly smaller and might improve performance if your disk is slow. -However, the files will not be human readable. +This will make the db files significantly smaller and might improve performance if your disk is slow. However, the files will not be human readable. ```python + DDB.config.use_compression = False # Default value + ``` -If you set pretty_json_files to True, the json db files will be indented and the keys will be sorted. -It won't affect compressed files, since the are not human-readable anyways. +### Indentation +Set the way how written json files should be indented. Behaves exactly like json.dumps(indent=...). It can be an `int` for the number of spaces, the tab character, or `None` if you don't want the files to be indented. ```python -DDB.config.pretty_json_files = True # Default value + +DDB.config.indent = "\t" # Default value + ``` -You can specify your own json encoder and decoder if you need to. +### Sort keys +Specify if you want the dict keys to be sorted when writing to a file.Behaves exactly like json.dumps(sort_keys=...). +```python + +DDB.config.sort_keys = True # Default value + +``` + +### Use orjson +You can specify the orjson encoder and decoder if you need to. The standard library json module is sufficient most of the time. -However, alternatives like orjson might be more performant for your use case. -The encoder function should take a dict and return a str or bytes. -The decoder function should take a string and return a dict. +However, orjson is a lot more performant in virtually all cases. ```python -DDB.config.custom_json_encoder = None # Default value -DDB.config.custom_json_decoder = None # Default value + +DDB.config.use_orjson = True # Default value + ``` +# Usage +## Import -## Create dicts -Before you can access dicts, you need to explicitly create them. +```python -Do create ones that already exist, this would raise an exception. -Also do not access ones that do not exist, this will also raise an exception. +import DictDataBase as DDB +``` + + +## Create dict +This library is called DictDataBase, but you can actually use any json serializable object. ```python + user_data_dict = { "users": { - "Ben": { - "age": 30, - "job": "Software Engineer" - }, - "Sue": { - "age": 21: - "job": "Student" - }, - "Joe": { - "age": 50, - "job": "Influencer" - } + "Ben": { "age": 30, "job": "Software Engineer" }, + "Sue": { "age": 21, "job": "Student" }, + "Joe": { "age": 50, "job": "Influencer" } }, "follows": [["Ben", "Sue"], ["Joe", "Ben"]] }) -DDB.create("user_data", db=user_data_dict) -# There is now a file called user_data.json (or user_data.ddb if you use compression) + +DDB.at("user_data").create(user_data_dict) + +# There is now a file called user_data.json +# (or user_data.ddb if you use compression) # in your specified storage directory. ``` +## Check if exists + + ## Read dicts + ```python -d = DDB.read("user_data") + +d = DDB.at("user_data").read() # You now have a copy of the dict named "user_data" print(d == user_data_dict) # True + + +# Only partially read Joe +joe = DDB.at("user_data").read("Joe") +print(joe == user_data_dict["Joe"]) + ``` -## Write dicts +## Write dicts ```python + import DictDataBase as DDB -with DDB.session("user_data") as (session, user_data): - # You now have a handle on the dict named "user_data" - # Inside the with statement, the file of user_data will be locked, and no other - # processes will be able to interfere. - user_data["follows"].append(["Sue", "Ben"]) - session.write() - # Now the changes to d are written to the database - -print(DDB.read("user_data")["follows"]) + +with DDB.at("user_data").session() as (session, user_data): + +# You now have a handle on the dict named "user_data" + +# Inside the with statement, the file of user_data will be locked, and no other + +# processes will be able to interfere. + +user_data["follows"].append(["Sue", "Ben"]) + +session.write() + +# Now the changes to d are written to the database + + + +print(DDB.at("user_data").read()["follows"]) + # -> [["Ben", "Sue"], ["Joe", "Ben"], ["Sue", "Ben"]] + ``` If you do not call session.write(), the database file will not be modified. + + +# API Reference + +### at() + +## DDBMethodChooser + +### exists() + +### haskey() (can also be part of exists) + +### create() + +### delete() + +### read() + +### session() diff --git a/cli.sh b/cli.sh new file mode 100755 index 0000000..d56f4f2 --- /dev/null +++ b/cli.sh @@ -0,0 +1,19 @@ +#!/bin/sh +while [ $# -gt 0 ]; do case $1 in + + + --test|-t) + poetry run pytest --cov=dictdatabase --cov-report term-missing + rm ./.coverage + shift ;; + + + + *|-*|--*) + echo "Unknown option $1" + echo "Usage: [ -t | --test ] [ -p | --profiler ]" + exit 2 + exit 1 ;; + + +esac; done diff --git a/dictdatabase/__init__.py b/dictdatabase/__init__.py index 32c1759..f1b5311 100644 --- a/dictdatabase/__init__.py +++ b/dictdatabase/__init__.py @@ -1,3 +1 @@ -from . reading import exists, read, multiread -from . writing import create, delete, session, multisession -from . models import SubModel +from . models import SubModel, at diff --git a/dictdatabase/config.py b/dictdatabase/config.py index d95c9c9..4e438a0 100644 --- a/dictdatabase/config.py +++ b/dictdatabase/config.py @@ -1,7 +1,5 @@ -from typing import Optional, Callable - storage_directory = "./ddb_storage" use_compression = False -pretty_json_files = True -custom_json_encoder: Optional[Callable[[dict], str | bytes]] = None -custom_json_decoder: Optional[Callable[[str], dict]] = None +use_orjson = True +indent = "\t" # eg. "\t" or 4 or None +sort_keys = True diff --git a/dictdatabase/io_safe.py b/dictdatabase/io_safe.py index 2b0ce12..6622a0c 100644 --- a/dictdatabase/io_safe.py +++ b/dictdatabase/io_safe.py @@ -1,6 +1,5 @@ import os -from . locking import ReadLock, WriteLock -from . import config, utils, io_unsafe +from . import config, utils, io_unsafe, locking def read(db_name: str): @@ -14,10 +13,27 @@ def read(db_name: str): if not json_exists and not ddb_exists: return None # Wait in any write lock case, "need" or "has". - lock = ReadLock(db_name) - res = io_unsafe.read(db_name) - lock.unlock() - return res + lock = locking.ReadLock(db_name) + try: + return io_unsafe.read(db_name) + except BaseException as e: + raise e + finally: + lock.unlock() + + +def partial_read(db_name: str, key: str): + _, json_exists, _, ddb_exists = utils.db_paths(db_name) + if not json_exists and not ddb_exists: + return None + # Wait in any write lock case, "need" or "has". + lock = locking.ReadLock(db_name) + try: + return io_unsafe.partial_read(db_name, key).key_value + except BaseException as e: + raise e + finally: + lock.unlock() def write(db_name: str, db: dict): @@ -26,10 +42,13 @@ def write(db_name: str, db: dict): """ dirname = os.path.dirname(f"{config.storage_directory}/{db_name}.any") os.makedirs(dirname, exist_ok=True) - - write_lock = WriteLock(db_name) - io_unsafe.write(db_name, db) - write_lock.unlock() + write_lock = locking.WriteLock(db_name) + try: + io_unsafe.write(db_name, db) + except BaseException as e: + raise e + finally: + write_lock.unlock() def delete(db_name: str): @@ -39,9 +58,13 @@ def delete(db_name: str): json_path, json_exists, ddb_path, ddb_exists = utils.db_paths(db_name) if not json_exists and not ddb_exists: return None - write_lock = WriteLock(db_name) - if json_exists: - os.remove(json_path) - if ddb_exists: - os.remove(ddb_path) - write_lock.unlock() + write_lock = locking.WriteLock(db_name) + try: + if json_exists: + os.remove(json_path) + if ddb_exists: + os.remove(ddb_path) + except BaseException as e: + raise e + finally: + write_lock.unlock() diff --git a/dictdatabase/io_unsafe.py b/dictdatabase/io_unsafe.py index 0405790..0edcf6d 100644 --- a/dictdatabase/io_unsafe.py +++ b/dictdatabase/io_unsafe.py @@ -1,15 +1,35 @@ -import os +from __future__ import annotations +from dataclasses import dataclass +import orjson import json import zlib +import os from . import config, utils -def read(db_name: str) -> dict: +@dataclass(frozen=True) +class PartialFileHandle: + db_name: str + key: str + key_value: dict + value_start_index: int + value_end_index: int + original_data_str: str + indent_level: int + + +################################################################################ +#### Reading +################################################################################ + + +def read_string(db_name: str) -> str: """ - Read the file at db_path from the configured storage directory. - Make sure the file exists! + Read the content of a db as a string. + Reading is always possible, not matter how the config is set. + So a compressed ddb file can also be read if compression is disabled, + and vice versa. """ - json_path, json_exists, ddb_path, ddb_exists = utils.db_paths(db_name) if json_exists and ddb_exists: @@ -18,57 +38,131 @@ def read(db_name: str) -> dict: if not json_exists and not ddb_exists: raise FileNotFoundError(f"DB \"{db_name}\" does not exist.") - - data_str: str = None - - # Uncompressed json + # Read from json file if json_exists: with open(json_path, "r") as f: - data_str = f.read() - # Compressed ddb - elif ddb_exists: + return f.read() + # Read from compressed ddb file + if ddb_exists: with open(ddb_path, "rb") as f: data_bytes = f.read() - data_str = zlib.decompress(data_bytes).decode() + return zlib.decompress(data_bytes).decode() + - # Use custom decoder if provided - if config.custom_json_decoder is not None: - return config.custom_json_decoder(data_str) - # Otherwise, use default json decoder - return json.loads(data_str) +def read(db_name: str) -> dict: + """ + Read the file at db_path from the configured storage directory. + Make sure the file exists. If it does notnot a FileNotFoundError is + raised. + """ + data = read_string(db_name) + return orjson.loads(data) if config.use_orjson else json.loads(data) -def write(db_name: str, db: dict): +def partial_read(db_name: str, key: str) -> PartialFileHandle: """ - Write the dict db dumped as a json string - to the file of the db_path. + Partially read a key from a db. + The key MUST be unique in the entire db, otherwise the behavior is undefined. + This is a lot faster than reading the entire db, because it does not parse + the entire file, but only the part part of the : pair. + + If the key is not found, a `KeyError` is raised. """ - json_path, json_exists, ddb_path, ddb_exists = utils.db_paths(db_name) - # Dump db dict as string - db_dump: str | bytes = None + data = read_string(db_name) + key_str = f"\"{key}\":" + key_str_index = utils.find_outermost_key_str_index(data, key_str) - # Use custom encoder if provided - if config.custom_json_encoder is not None and not config.use_compression: - db_dump = config.custom_json_encoder(db) - # Only generate pretty json if compression is disabled - elif config.pretty_json_files and not config.use_compression: - db_dump = json.dumps(db, indent="\t", sort_keys=True) - # Generate compact json - else: - db_dump = json.dumps(db) + if key_str_index == -1: + raise KeyError(f"Key \"{key}\" not found in db \"{db_name}\"") + + # Count the amount of whitespace before the key + # to determine the indentation level + indentation_level = 0 + for i in range(key_str_index-1, -1, -1): + if data[i] not in [" ", "\t"]: + break + indentation_level += 1 + + if isinstance(config.indent, int) and config.indent > 0: + indentation_level //= config.indent + + value_start_index = key_str_index + len(key_str) + value_end_index = utils.seek_index_through_value(data, value_start_index) + + return PartialFileHandle( + db_name=db_name, + key=key, + key_value=json.loads(data[value_start_index:value_end_index]), + value_start_index=value_start_index, + value_end_index=value_end_index, + original_data_str=data, + indent_level=indentation_level, + ) + +################################################################################ +#### Writing +################################################################################ + + +def write_dump(db_name: str, dump: str | bytes): + """ + Write the dump to the file of the db_path. + If the db was compressed but now config.use_compression is False, + remove the compressed file, and vice versa. + """ + json_path, json_exists, ddb_path, ddb_exists = utils.db_paths(db_name) + # Write bytes or string to file if config.use_compression: write_path = ddb_path if json_exists: os.remove(json_path) - db_dump = zlib.compress(db_dump if isinstance(db_dump, bytes) else db_dump.encode(), 1) else: write_path = json_path if ddb_exists: os.remove(ddb_path) + if config.use_compression: + dump = zlib.compress(dump if isinstance(dump, bytes) else dump.encode(), 1) + # Write bytes or string to file - open_mode = "wb" if isinstance(db_dump, bytes) else "w" + open_mode = "wb" if isinstance(dump, bytes) else "w" with open(write_path, open_mode) as f: - f.write(db_dump) + f.write(dump) + + +def write(db_name: str, db: dict): + """ + Write the dict db dumped as a json string + to the file of the db_path. + """ + if config.use_orjson: + orjson_indent = orjson.OPT_INDENT_2 if config.indent else 0 + orjson_sort_keys = orjson.OPT_SORT_KEYS if config.sort_keys else 0 + db_dump = orjson.dumps(db, option=orjson_indent | orjson_sort_keys) + else: + db_dump = json.dumps(db, indent=config.indent, sort_keys=config.sort_keys) + + write_dump(db_name, db_dump) + + +def partial_write(pf: PartialFileHandle): + """ + Write a partial file handle to the db. + """ + if config.use_orjson: + orjson_indent = orjson.OPT_INDENT_2 if config.indent else 0 + orjson_sort_keys = orjson.OPT_SORT_KEYS if config.sort_keys else 0 + partial_dump = orjson.dumps(pf.key_value, option=orjson_indent | orjson_sort_keys) + partial_dump = partial_dump.decode() + else: + partial_dump = json.dumps(pf.key_value, indent=config.indent, sort_keys=config.sort_keys) + + if config.indent is not None: + indent_with = " " * config.indent if isinstance(config.indent, int) else config.indent + partial_dump = partial_dump.replace("\n", "\n" + (pf.indent_level * indent_with)) + + dump_start = pf.original_data_str[:pf.value_start_index] + dump_end = pf.original_data_str[pf.value_end_index:] + write_dump(pf.db_name, f"{dump_start} {partial_dump}{dump_end}") diff --git a/dictdatabase/locking.py b/dictdatabase/locking.py index b86e969..096dc79 100644 --- a/dictdatabase/locking.py +++ b/dictdatabase/locking.py @@ -1,3 +1,4 @@ +from __future__ import annotations import threading import time from pathlib import Path @@ -87,7 +88,8 @@ def __init__(self, db_name): # Except if current thread already has a read lock if check_if_lock_exists(db_name, self.id, "hasread"): - raise RuntimeError("Thread already has a read lock.") + need_read_path.unlink() + raise RuntimeError("Thread already has a read lock. Do not try to obtain a read lock twice.") # Make path of the hyptoetical hasread lock self.path = Path(path_str(db_name, self.id, self.time_ns, "hasread")) @@ -120,7 +122,8 @@ def __init__(self, db_name): # Except if current thread already has a write lock if check_if_lock_exists(db_name, self.id, "haswrite"): - raise RuntimeError("Thread already has a write lock. Do not open sessions while already in a session.") + need_write_path.unlink() + raise RuntimeError("Thread already has a write lock. Do try to obtain a write lock twice.") # Make path of the hyptoetical haswrite lock self.path = Path(path_str(db_name, self.id, self.time_ns, "haswrite")) diff --git a/dictdatabase/models.py b/dictdatabase/models.py index 3f91566..535cc60 100644 --- a/dictdatabase/models.py +++ b/dictdatabase/models.py @@ -1,6 +1,7 @@ from __future__ import annotations from path_dict import PathDict -from . import utils, io_safe, writing +from . import utils, io_safe +from . sessions import DDBSession, DDBMultiSession, DDBSubSession class SubModel(PathDict): @@ -22,10 +23,8 @@ def __init__(self, key: str, initial_value=None): else: raise ValueError("If provided, initial_value must be a dict or PathDict") - def session(self): - return writing.DDBSession(self.db_name, as_PathDict=True) - + return DDBSession(self.db_name, as_PathDict=True) def read(self): """ @@ -36,3 +35,106 @@ def read(self): return None self.data = self.file_db.get(self.key, None) return self + + +def at(*path): + return DDBMethodChooser(*path) + + +class DDBMethodChooser: + def __init__(self, *path): + if len(path) > 1: + self.path = utils.to_path_str("/".join(path)) + else: + self.path = utils.to_path_str(*path) + + def exists(self) -> bool: + """ + Efficiently checks if a database exists. + If it contains a wildcard, it will return True if at least one exists. + """ + return len(utils.find(self.path)) > 0 + + def haskey(self, key: str) -> bool: + """ + Checks if a key exists in a database. + The key can be anywhere in the database, even deeply nested. + As long it exists as a key in any dict, it will be found. + """ + try: + io_safe.subread(self.path, key=key) + return True + except KeyError: + return False + + def create(self, db=None, force_overwrite=False): + """ + It creates a database file at the given path, and writes the given database to + it + + :param db: The database to create. If not specified, an empty database is + created. + :param force_overwrite: If True, will overwrite the database if it already + exists, defaults to False (optional). + """ + # Except if db exists and force_overwrite is False + if not force_overwrite and self.exists(): + raise FileExistsError(f"Database {self.path} already exists. Pass force_overwrite=True to overwrite.") + # Write db to file + if db is None: + db = {} + data = db.dict if isinstance(db, PathDict) else db + io_safe.write(self.path, data) + + def delete(self): + """ + Delete the database at the selected path. + """ + io_safe.delete(self.path) + + def read(self, key: str = None, as_PathDict: bool = False) -> dict | PathDict: + """ + Reads a database and returns it as a PathDict. + If a key is given, return the efficiently read key value. + + Mutliread reads multiple dbs and returns them as a single dict or PathDict. + Path components can be "*" (all), a specific name of a list (only those from list). + + Subread reads a database and returns the partial value. + """ + if key is not None: + if "*" in key: + raise ValueError("A key cannot be specified with a wildcard.") + # subread + _, json_exists, _, ddb_exists = utils.db_paths(self.path) + if not json_exists and not ddb_exists: + return None + # Wait in any write lock case, "need" or "has". + data = io_safe.partial_read(self.path, key) + return PathDict(data) if as_PathDict else data + if "*" in self.path: + # multiread + pattern_paths = utils.expand_find_path_pattern(self.path) + res = {db_name: io_safe.read(db_name) for db_name in pattern_paths} + return PathDict(res) if as_PathDict else res + else: + # Normal read + db = io_safe.read(self.path) + return PathDict(db) if as_PathDict else db + + def session(self, key: str = None, as_PathDict: bool = False) -> DDBSession | DDBMultiSession | DDBSubSession: + """ + + Open multiple files at once using a glob pattern, like "user/*". + Mutliple arguments are allowed to access folders, + so session(f"users/{user_id}") is equivalent + to session("users", user_id). + """ + if key is not None and "*" in key: + raise ValueError("A key cannot be specified with a wildcard.") + if key is not None: + return DDBSubSession(self.path, key, as_PathDict=as_PathDict) + elif "*" in self.path: + return DDBMultiSession(self.path, as_PathDict=as_PathDict) + else: + return DDBSession(self.path, as_PathDict=as_PathDict) diff --git a/dictdatabase/reading.py b/dictdatabase/reading.py deleted file mode 100644 index eb33727..0000000 --- a/dictdatabase/reading.py +++ /dev/null @@ -1,25 +0,0 @@ -from path_dict import PathDict -from . import utils, io_safe - - -def exists(*name) -> bool: - """ - Efficiently checks if a database exists. - If it contains a wildcard, it will return True if at least one exists. - """ - return len(utils.find(utils.to_path_str(name))) > 0 - - -def read(*name, as_PathDict: bool = False) -> dict | PathDict: - db = io_safe.read(utils.to_path_str(name)) - return PathDict(db) if as_PathDict else db - - -def multiread(*pattern, as_PathDict: bool = False): - """ - Mutliread reads multiple dbs and returns them as a single dict or PathDict. - Path components can be "*" (all), a specific name of a list (only those from list). - """ - pattern_paths = utils.expand_find_path_pattern(pattern) - res = {db_name: io_safe.read(db_name) for db_name in pattern_paths} - return PathDict(res) if as_PathDict else res diff --git a/dictdatabase/writing.py b/dictdatabase/sessions.py similarity index 58% rename from dictdatabase/writing.py rename to dictdatabase/sessions.py index 5461d23..bb88f2c 100644 --- a/dictdatabase/writing.py +++ b/dictdatabase/sessions.py @@ -1,33 +1,7 @@ +from __future__ import annotations +from typing import Tuple from path_dict import PathDict -from . import utils, io_unsafe, io_safe, reading -from . locking import WriteLock - - -def create(*name, db=None, force_overwrite=False): - """ - It creates a database file at the given path, and writes the given database to - it - - :param db: The database to create. If not specified, an empty database is - created. - :param force_overwrite: If True, will overwrite the database if it already - exists, defaults to False (optional). - """ - db_name = utils.to_path_str(name) - # Except if db exists and force_overwrite is False - if not force_overwrite and reading.exists(db_name): - raise FileExistsError(f"Database {db_name} already exists. Pass force_overwrite=True to DDB.create() to overwrite.") - # Write db to file - db = db or {} - data = db.dict if isinstance(db, PathDict) else db - io_safe.write(db_name, data) - - -def delete(*name): - """ - Delete the database with the given name. - """ - io_safe.delete(utils.to_path_str(name)) +from . import utils, io_unsafe, locking class DDBSession(object): @@ -44,7 +18,7 @@ def __init__(self, db_name: str, as_PathDict: bool = False): self.as_PathDict = as_PathDict self.in_session = False - def __enter__(self): + def __enter__(self) -> Tuple("DDBSession", dict | PathDict): """ Any number of read tasks can be carried out in parallel. Each read task creates a read lock while reading, to signal that it is reading. @@ -53,15 +27,15 @@ def __enter__(self): No new read tasks will be allowed. When all read tasks are done, the session aquire the write lock. Now, it can savely read and write while all other tasks wait. """ - self.write_lock = WriteLock(self.db_name) + self.write_lock = locking.WriteLock(self.db_name) self.in_session = True try: self.dict = io_unsafe.read(self.db_name) if self.as_PathDict: self.dict = PathDict(self.dict) - except BaseException: + except BaseException as e: self.write_lock.unlock() - raise + raise e return self, self.dict def __exit__(self, type, value, tb): @@ -76,9 +50,6 @@ def write(self): io_unsafe.write(self.db_name, data) -def session(*name, as_PathDict: bool = False): - return DDBSession(utils.to_path_str(name), as_PathDict=as_PathDict) - class DDBMultiSession(object): def __init__(self, pattern: str, as_PathDict: bool = False): @@ -86,17 +57,17 @@ def __init__(self, pattern: str, as_PathDict: bool = False): self.as_PathDict = as_PathDict self.in_session = False - def __enter__(self): - self.write_locks = [WriteLock(x) for x in self.db_names] + def __enter__(self) -> Tuple("DDBMultiSession", dict | PathDict): + self.write_locks = [locking.WriteLock(x) for x in self.db_names] self.in_session = True try: self.dicts = {n: io_unsafe.read(n) for n in self.db_names} if self.as_PathDict: self.dicts = PathDict(self.dicts) - except BaseException: + except BaseException as e: for write_lock in self.write_locks: write_lock.unlock() - raise + raise e return self, self.dicts def __exit__(self, type, value, tb): @@ -113,11 +84,33 @@ def write(self): io_unsafe.write(db_name, data) -def multisession(*pattern, as_PathDict: bool = False): - """ - Open multiple files at once using a glob pattern, like "user*". - Mutliple arguments are allowed to access folders, - so multisession(f"users/{user_id}") is equivalent - to multisession("users", user_id). - """ - return DDBMultiSession(utils.to_path_str(pattern), as_PathDict=as_PathDict) +class DDBSubSession(object): + def __init__(self, db_name: str, key: str, as_PathDict: bool = False): + self.db_name = db_name + self.key = key + self.as_PathDict = as_PathDict + self.in_session = False + + def __enter__(self) -> Tuple("DDBSubSession", dict | PathDict): + self.write_lock = locking.WriteLock(self.db_name) + self.in_session = True + try: + self.partial_handle = io_unsafe.partial_read(self.db_name, self.key) + if self.as_PathDict: + self.dict = PathDict(self.partial_handle.key_value) + else: + self.dict = self.partial_handle.key_value + except BaseException as e: + self.write_lock.unlock() + raise e + return self, self.dict + + def __exit__(self, type, value, tb): + self.write_lock.unlock() + self.write_lock = None + self.in_session = False + + def write(self): + if not self.in_session: + raise PermissionError("Only call write() inside a with statement.") + io_unsafe.partial_write(self.partial_handle) diff --git a/dictdatabase/utils.py b/dictdatabase/utils.py index f7bfb0d..54ede4a 100644 --- a/dictdatabase/utils.py +++ b/dictdatabase/utils.py @@ -1,6 +1,7 @@ +from __future__ import annotations +from typing import Tuple import os import glob -from typing import Tuple from . import config @@ -42,16 +43,95 @@ def find(*pattern) -> list[str]: return dbs_all -def expand_find_path_pattern(pattern): +def expand_find_path_pattern(path): """ :param str pattern: The pattern to expand. Fot a tuple of path items, expand it to a list of all real paths. An item can be some string, a wildcard "*" or a list to select specific paths. """ res = [[]] - for item in pattern: + for item in path.split("/"): if isinstance(item, str): res = [r + [item] for r in res] if isinstance(item, list): res = [r + [list_item] for list_item in item for r in res] - return [f for r in res for f in find(r)] + return [f for r in res for f in find(*r)] + + +def seek_index_through_value(data: str, index: int) -> int: + """ + Finds the index of the next comma or closing bracket/brace, but only if + it is at the same indentation level as at the start index. + + :param data: The string to be parsed + :param index: the index of the first character of the value + """ + in_str, list_depth, dict_depth = False, 0, 0 + + d_prev, d_curr = None, data[index - 1] + for i in range(index, len(data)): + d_prev, d_curr = d_curr, data[i] + prev_backslash = d_prev == "\\" + if d_curr == '"' and not prev_backslash: + in_str = not in_str + continue + if in_str or d_curr == " " or prev_backslash: + continue + if d_curr == "[": + list_depth += 1 + elif d_curr == "]": + list_depth -= 1 + elif d_curr == "{": + dict_depth += 1 + elif d_curr == "}": + dict_depth -= 1 + if list_depth == 0 and dict_depth == 0: + return i + 1 + + +def count_nesting(data: str, start: int, end: int) -> int: + """ + Returns the number of nesting levels between the start and end indices. + + :param data: The string to be parsed + """ + in_str, nesting = False, 0 + + d_prev, d_curr = None, data[start - 1] + for i in range(start, end): + d_prev, d_curr = d_curr, data[i] + prev_backslash = d_prev == "\\" + if d_curr == '"' and not prev_backslash: + in_str = not in_str + continue + if in_str: + continue + elif d_curr == "{": + nesting += 1 + elif d_curr == "}": + nesting -= 1 + return nesting + + +def find_outermost_key_str_index(data: str, key_str: str): + """ + Returns the index of the key_str that is at the outermost nesting level. + """ + if (curr_i := data.find(key_str, 0)) == -1: + return -1 + + key_nest = [(curr_i, 0)] # (key, nesting) + + while (next_i := data.find(key_str, curr_i + len(key_str))) != -1: + nesting = count_nesting(data, curr_i + len(key_str), next_i) + key_nest.append((next_i, nesting)) + curr_i = next_i + + # Early exit if there is only one key + if len(key_nest) == 1: + return key_nest[0][0] + + # Relative to total nesting + for i in range(1, len(key_nest)): + key_nest[i] = (key_nest[i][0], key_nest[i - 1][1] + key_nest[i][1]) + return min(key_nest, key=lambda x: x[1])[0] diff --git a/poetry.lock b/poetry.lock index 61c7efc..5f881eb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,19 @@ [[package]] -name = "Brotli" +name = "attrs" +version = "22.1.0" +description = "Classes Without Boilerplate" +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.extras] +dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] +docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] +tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] +tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] + +[[package]] +name = "brotli" version = "1.0.9" description = "Python bindings for the Brotli compression library" category = "main" @@ -7,32 +21,54 @@ optional = false python-versions = "*" [[package]] -name = "cloudpickle" -version = "2.2.0" -description = "Extended pickling support for Python objects" +name = "colorama" +version = "0.4.5" +description = "Cross-platform colored terminal text." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] -name = "commonmark" -version = "0.9.1" -description = "Python parser for the CommonMark Markdown spec" +name = "coverage" +version = "6.5.0" +description = "Code coverage measurement for Python" category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.7" + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] -test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] +toml = ["tomli"] + +[[package]] +name = "iniconfig" +version = "1.1.1" +description = "iniconfig: brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = "*" [[package]] name = "orjson" version = "3.8.0" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -category = "dev" +category = "main" optional = false python-versions = ">=3.7" +[[package]] +name = "packaging" +version = "21.3" +description = "Core utilities for Python packages" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" + [[package]] name = "path-dict" version = "1.2.6" @@ -41,6 +77,26 @@ category = "main" optional = false python-versions = ">=3.8,<4.0" +[[package]] +name = "pluggy" +version = "1.0.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "py" +version = "1.11.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + [[package]] name = "pycodestyle" version = "2.9.1" @@ -50,51 +106,61 @@ optional = false python-versions = ">=3.6" [[package]] -name = "pygments" -version = "2.13.0" -description = "Pygments is a syntax highlighting package written in Python." +name = "pyinstrument" +version = "4.3.0" +description = "Call stack profiler for Python. Shows you why your code is slow!" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] -plugins = ["importlib-metadata"] +jupyter = ["ipython"] [[package]] -name = "pynvml" -version = "11.4.1" -description = "Python Bindings for the NVIDIA Management Library" +name = "pyparsing" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.8" + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] [[package]] -name = "rich" -version = "12.6.0" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +name = "pytest" +version = "7.1.3" +description = "pytest: simple powerful testing with Python" category = "dev" optional = false -python-versions = ">=3.6.3,<4.0.0" +python-versions = ">=3.7" [package.dependencies] -commonmark = ">=0.9.0,<0.10.0" -pygments = ">=2.6.0,<3.0.0" +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +py = ">=1.8.2" +tomli = ">=1.0.0" [package.extras] -jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] [[package]] -name = "scalene" -version = "1.5.13" -description = "Scalene: A high-resolution, low-overhead CPU, GPU, and memory profiler for Python" +name = "pytest-cov" +version = "4.0.0" +description = "Pytest plugin for measuring coverage." category = "dev" optional = false -python-versions = ">=3.8" +python-versions = ">=3.6" [package.dependencies] -cloudpickle = ">=1.5.0" -pynvml = ">=11.0.0" -rich = ">=9.2.0" +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] [[package]] name = "snakeviz" @@ -115,6 +181,14 @@ category = "dev" optional = false python-versions = ">=3.8,<4.0" +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.7" + [[package]] name = "tornado" version = "6.2" @@ -125,11 +199,15 @@ python-versions = ">= 3.7" [metadata] lock-version = "1.1" -python-versions = "^3.10" -content-hash = "0b27cdf56a389cd08f03035901599f976665ad854f1f2f812c19a16d40833dad" +python-versions = "^3.8" +content-hash = "f3f13fab1f4bbd80516b4f80e2066661af99029bf6f8408373baa326e4f790be" [metadata.files] -Brotli = [ +attrs = [ + {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"}, + {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"}, +] +brotli = [ {file = "Brotli-1.0.9-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:268fe94547ba25b58ebc724680609c8ee3e5a843202e9a381f6f9c5e8bdb5c70"}, {file = "Brotli-1.0.9-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:c2415d9d082152460f2bd4e382a1e85aed233abc92db5a3880da2257dc7daf7b"}, {file = "Brotli-1.0.9-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5913a1177fc36e30fcf6dc868ce23b0453952c78c04c266d3149b3d39e1410d6"}, @@ -193,13 +271,65 @@ Brotli = [ {file = "Brotli-1.0.9-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:76ffebb907bec09ff511bb3acc077695e2c32bc2142819491579a695f77ffd4d"}, {file = "Brotli-1.0.9.zip", hash = "sha256:4d1b810aa0ed773f81dceda2cc7b403d01057458730e309856356d4ef4188438"}, ] -cloudpickle = [ - {file = "cloudpickle-2.2.0-py3-none-any.whl", hash = "sha256:7428798d5926d8fcbfd092d18d01a2a03daf8237d8fcdc8095d256b8490796f0"}, - {file = "cloudpickle-2.2.0.tar.gz", hash = "sha256:3f4219469c55453cfe4737e564b67c2a149109dabf7f242478948b895f61106f"}, +colorama = [ + {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, + {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, ] -commonmark = [ - {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, - {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, +coverage = [ + {file = "coverage-6.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ef8674b0ee8cc11e2d574e3e2998aea5df5ab242e012286824ea3c6970580e53"}, + {file = "coverage-6.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:784f53ebc9f3fd0e2a3f6a78b2be1bd1f5575d7863e10c6e12504f240fd06660"}, + {file = "coverage-6.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4a5be1748d538a710f87542f22c2cad22f80545a847ad91ce45e77417293eb4"}, + {file = "coverage-6.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83516205e254a0cb77d2d7bb3632ee019d93d9f4005de31dca0a8c3667d5bc04"}, + {file = "coverage-6.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af4fffaffc4067232253715065e30c5a7ec6faac36f8fc8d6f64263b15f74db0"}, + {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:97117225cdd992a9c2a5515db1f66b59db634f59d0679ca1fa3fe8da32749cae"}, + {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a1170fa54185845505fbfa672f1c1ab175446c887cce8212c44149581cf2d466"}, + {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:11b990d520ea75e7ee8dcab5bc908072aaada194a794db9f6d7d5cfd19661e5a"}, + {file = "coverage-6.5.0-cp310-cp310-win32.whl", hash = "sha256:5dbec3b9095749390c09ab7c89d314727f18800060d8d24e87f01fb9cfb40b32"}, + {file = "coverage-6.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:59f53f1dc5b656cafb1badd0feb428c1e7bc19b867479ff72f7a9dd9b479f10e"}, + {file = "coverage-6.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4a5375e28c5191ac38cca59b38edd33ef4cc914732c916f2929029b4bfb50795"}, + {file = "coverage-6.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4ed2820d919351f4167e52425e096af41bfabacb1857186c1ea32ff9983ed75"}, + {file = "coverage-6.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33a7da4376d5977fbf0a8ed91c4dffaaa8dbf0ddbf4c8eea500a2486d8bc4d7b"}, + {file = "coverage-6.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8fb6cf131ac4070c9c5a3e21de0f7dc5a0fbe8bc77c9456ced896c12fcdad91"}, + {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a6b7d95969b8845250586f269e81e5dfdd8ff828ddeb8567a4a2eaa7313460c4"}, + {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1ef221513e6f68b69ee9e159506d583d31aa3567e0ae84eaad9d6ec1107dddaa"}, + {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cca4435eebea7962a52bdb216dec27215d0df64cf27fc1dd538415f5d2b9da6b"}, + {file = "coverage-6.5.0-cp311-cp311-win32.whl", hash = "sha256:98e8a10b7a314f454d9eff4216a9a94d143a7ee65018dd12442e898ee2310578"}, + {file = "coverage-6.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:bc8ef5e043a2af066fa8cbfc6e708d58017024dc4345a1f9757b329a249f041b"}, + {file = "coverage-6.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4433b90fae13f86fafff0b326453dd42fc9a639a0d9e4eec4d366436d1a41b6d"}, + {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4f05d88d9a80ad3cac6244d36dd89a3c00abc16371769f1340101d3cb899fc3"}, + {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94e2565443291bd778421856bc975d351738963071e9b8839ca1fc08b42d4bef"}, + {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:027018943386e7b942fa832372ebc120155fd970837489896099f5cfa2890f79"}, + {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:255758a1e3b61db372ec2736c8e2a1fdfaf563977eedbdf131de003ca5779b7d"}, + {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:851cf4ff24062c6aec510a454b2584f6e998cada52d4cb58c5e233d07172e50c"}, + {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:12adf310e4aafddc58afdb04d686795f33f4d7a6fa67a7a9d4ce7d6ae24d949f"}, + {file = "coverage-6.5.0-cp37-cp37m-win32.whl", hash = "sha256:b5604380f3415ba69de87a289a2b56687faa4fe04dbee0754bfcae433489316b"}, + {file = "coverage-6.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4a8dbc1f0fbb2ae3de73eb0bdbb914180c7abfbf258e90b311dcd4f585d44bd2"}, + {file = "coverage-6.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d900bb429fdfd7f511f868cedd03a6bbb142f3f9118c09b99ef8dc9bf9643c3c"}, + {file = "coverage-6.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2198ea6fc548de52adc826f62cb18554caedfb1d26548c1b7c88d8f7faa8f6ba"}, + {file = "coverage-6.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c4459b3de97b75e3bd6b7d4b7f0db13f17f504f3d13e2a7c623786289dd670e"}, + {file = "coverage-6.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20c8ac5386253717e5ccc827caad43ed66fea0efe255727b1053a8154d952398"}, + {file = "coverage-6.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b07130585d54fe8dff3d97b93b0e20290de974dc8177c320aeaf23459219c0b"}, + {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dbdb91cd8c048c2b09eb17713b0c12a54fbd587d79adcebad543bc0cd9a3410b"}, + {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3001a203182842a4630e7b8d1a2c7c07ec1b45d3084a83d5d227a3806f530f"}, + {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e07f4a4a9b41583d6eabec04f8b68076ab3cd44c20bd29332c6572dda36f372e"}, + {file = "coverage-6.5.0-cp38-cp38-win32.whl", hash = "sha256:6d4817234349a80dbf03640cec6109cd90cba068330703fa65ddf56b60223a6d"}, + {file = "coverage-6.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:7ccf362abd726b0410bf8911c31fbf97f09f8f1061f8c1cf03dfc4b6372848f6"}, + {file = "coverage-6.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:633713d70ad6bfc49b34ead4060531658dc6dfc9b3eb7d8a716d5873377ab745"}, + {file = "coverage-6.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:95203854f974e07af96358c0b261f1048d8e1083f2de9b1c565e1be4a3a48cfc"}, + {file = "coverage-6.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9023e237f4c02ff739581ef35969c3739445fb059b060ca51771e69101efffe"}, + {file = "coverage-6.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:265de0fa6778d07de30bcf4d9dc471c3dc4314a23a3c6603d356a3c9abc2dfcf"}, + {file = "coverage-6.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f830ed581b45b82451a40faabb89c84e1a998124ee4212d440e9c6cf70083e5"}, + {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7b6be138d61e458e18d8e6ddcddd36dd96215edfe5f1168de0b1b32635839b62"}, + {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42eafe6778551cf006a7c43153af1211c3aaab658d4d66fa5fcc021613d02518"}, + {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:723e8130d4ecc8f56e9a611e73b31219595baa3bb252d539206f7bbbab6ffc1f"}, + {file = "coverage-6.5.0-cp39-cp39-win32.whl", hash = "sha256:d9ecf0829c6a62b9b573c7bb6d4dcd6ba8b6f80be9ba4fc7ed50bf4ac9aecd72"}, + {file = "coverage-6.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc2af30ed0d5ae0b1abdb4ebdce598eafd5b35397d4d75deb341a614d333d987"}, + {file = "coverage-6.5.0-pp36.pp37.pp38-none-any.whl", hash = "sha256:1431986dac3923c5945271f169f59c45b8802a114c8f548d611f2015133df77a"}, + {file = "coverage-6.5.0.tar.gz", hash = "sha256:f642e90754ee3e06b0e7e51bce3379590e76b7f76b708e1a71ff043f87025c84"}, +] +iniconfig = [ + {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, + {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] orjson = [ {file = "orjson-3.8.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:9a93850a1bdc300177b111b4b35b35299f046148ba23020f91d6efd7bf6b9d20"}, @@ -245,39 +375,89 @@ orjson = [ {file = "orjson-3.8.0-cp39-none-win_amd64.whl", hash = "sha256:2058653cc12b90e482beacb5c2d52dc3d7606f9e9f5a52c1c10ef49371e76f52"}, {file = "orjson-3.8.0.tar.gz", hash = "sha256:fb42f7cf57d5804a9daa6b624e3490ec9e2631e042415f3aebe9f35a8492ba6c"}, ] +packaging = [ + {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, + {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, +] path-dict = [ {file = "path_dict-1.2.6-py3-none-any.whl", hash = "sha256:8fe7af2f3ead048f42c621f2dbff341e4219b7c70adc12499b161a3e47f71f5b"}, {file = "path_dict-1.2.6.tar.gz", hash = "sha256:ac144ca3182ea411a79bae4a367df5237fae6ed502bae14da7cfb57ea412f8b5"}, ] +pluggy = [ + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] +py = [ + {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, + {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, +] pycodestyle = [ {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"}, {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"}, ] -pygments = [ - {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"}, - {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"}, +pyinstrument = [ + {file = "pyinstrument-4.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a641c1c37dd9259162b76c28b2a846f66d5b2c91825c8eaf536329c127855e01"}, + {file = "pyinstrument-4.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4ce24e16d3c4aae641186bd9c2540526642e6b6f181fb41f0fc6ecbe488cb3fb"}, + {file = "pyinstrument-4.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0b795db84fa066ff3502695cfe9a15dbda62c2e2ad66c27b6e12c7f96a2ca07"}, + {file = "pyinstrument-4.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bd0a3d5536d080b3f4b6f31707c9402a736209c9cc2decd930583ef858112"}, + {file = "pyinstrument-4.3.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6672bd073ecfd1304b4908742f447fd005acacd29746d7d31c1413bee9ac453d"}, + {file = "pyinstrument-4.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a4d327f47b1a31b306fb4fe6432af1d886c835cdbd980c7c4555beb092685804"}, + {file = "pyinstrument-4.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27a5cbd7dcf0b768f1a8faf82787e9fa358f278f810218633be7e0be1fa4b565"}, + {file = "pyinstrument-4.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b7ee1f54109a2da76170cedfcae32c2281179108c02d227250f3b00298b90625"}, + {file = "pyinstrument-4.3.0-cp310-cp310-win32.whl", hash = "sha256:251b394c03e9de88707e5deffbb62a5792374396db18608e595b6de85ab2345a"}, + {file = "pyinstrument-4.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:1e33e506a671561c72074324eba62babca7392ab3ed244cec683687eb6984f41"}, + {file = "pyinstrument-4.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1a7da205c581a195ffb5c9d7fac2342e31fc47ae993eaa6161973ad87090c077"}, + {file = "pyinstrument-4.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a2a3e3e4db917a55b06b520fba940df073ed0b06d1e6ae002a6bf2962d400b1c"}, + {file = "pyinstrument-4.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11450181910c2c7aba2a2cf3f54168a7adc02357e0e75f1d22393294a54b69db"}, + {file = "pyinstrument-4.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ff8c38829e2fe0dba8cc02ec58927d8d2cffffb73ae27ab229bf0b687e59785"}, + {file = "pyinstrument-4.3.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9968ad846a359458ba8c04085180962c871d851c3a05647f152ff1c86029d54c"}, + {file = "pyinstrument-4.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e785fe9f35a5e782e692bb1eb9fe2a9a94f06f1b9e090402ebd9028ce2effce6"}, + {file = "pyinstrument-4.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c943f0a5eefeffe19a31c955b01e96d814b423bcdb739717a30c9de0e469793e"}, + {file = "pyinstrument-4.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9925dffaeb7cfc44c164169327c81c854f09478852be289d75790b2df35f02e0"}, + {file = "pyinstrument-4.3.0-cp311-cp311-win32.whl", hash = "sha256:17ef8c945725c36de6d6c04039ac6d2178985579b1aa1b185d7bda5dc3c36897"}, + {file = "pyinstrument-4.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:5e5664ad63bae9db89775933d7c4fbfc42c7820506ba86501688a4aade8dda9f"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3ebd661641237c2e7861e288bd2a144934bef06b10adfc9851ffd00d2e75af0e"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5d6967294b91eefdd94035c35981bbffbaa41c96155c3ed3b23788f878f5106"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8fea9898d52d16774b36c62ef100fef1c56da25bcf913e156a2d5c7bc1330cd9"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:372548770c558cd25b8174a06bbac01963dd9cb2479233ae590be61ab40ce0ad"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6ca5d57298307bc81713161b2603a8805bdea1eda93489698d2179a754476e8f"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0816507fc3284e93b258d8f773e56aea420834d1415f986a3e5482441f78d274"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a0330a6245f997277f93a1d189a16928408794a097d1266bb55128d9964f9071"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-win32.whl", hash = "sha256:829676c816b2fc7475082ab495938ac64ba9e99a311246b5f197528f009657b9"}, + {file = "pyinstrument-4.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b923df701afd8f42cd8227c654c0c90bd69e5180207c4ea0368930d96e633ed9"}, + {file = "pyinstrument-4.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5e5e7fa5ba84cbef2ae1eed17fa3c242988e09922ba5a28fd3edb0e2c6493b61"}, + {file = "pyinstrument-4.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37b3661ae8113286d244595e467088b312285f43b035c07d5328767508b168e2"}, + {file = "pyinstrument-4.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378f126edb1c225016cb15eb775d0b9b330c549f62595c30b7298aaa0ee884c4"}, + {file = "pyinstrument-4.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb2aa31a68b695623860379ee6aa3242a803fe4bf81304faa70c238101a2b2ad"}, + {file = "pyinstrument-4.3.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997485418224a34af66f27b1bab364b846321632e0dad8e7b6ed434903bb4b0d"}, + {file = "pyinstrument-4.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:eb4db9e18e2e190e4e8833014e28bfaa847c27758d9a355255c92d481845aaaa"}, + {file = "pyinstrument-4.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:960c80e48137194f8758519bc5a8296a599994cc32303b765474a6015da6982d"}, + {file = "pyinstrument-4.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:12b553cf578fe1c6805529bf25c5e9c72877cb46de6c773c2d4f38626c956f9d"}, + {file = "pyinstrument-4.3.0-cp38-cp38-win32.whl", hash = "sha256:19ec8ffc2f8fe7466e97dc6e8aac6acc19fa51337432f317685a585694525927"}, + {file = "pyinstrument-4.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:4db0f0c2ce4616e7312c8eaa869ad377e98ab43f997c2e3fdf15d9ace1aba660"}, + {file = "pyinstrument-4.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7d6a06ab1f66d5e4d69804a8efa37e527648c5af9cdaea6a8eff8d6c8514788f"}, + {file = "pyinstrument-4.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:128d470628bc3aca03aeb9f3baa63f245f5f53a8fb1f43c6b1e004486c5ceeca"}, + {file = "pyinstrument-4.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5d2f0de75234a2dbf46ddad9aa02787f3d0cee24bc62095d113660490c9a0da"}, + {file = "pyinstrument-4.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8e739d8261c478936d39c4f15317ddc10516e03484a5ddd77787486dc4c102e"}, + {file = "pyinstrument-4.3.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83cba3ab3da1756820d4fa6ff68b3b0e9728560a6e12b2fc709a9b0b0963648d"}, + {file = "pyinstrument-4.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8bb06a4650c7b84c91f227b67f898a5a514eae02df3ce0727e119e87863d9731"}, + {file = "pyinstrument-4.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f900c409f8eb5b4446dd6daecfd4f544a0e1ffcc727515e0085028dea9c64f0c"}, + {file = "pyinstrument-4.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3a3c3e6161795277544a30b3c298afda2eb58f13be288fcde1f933e6ba603583"}, + {file = "pyinstrument-4.3.0-cp39-cp39-win32.whl", hash = "sha256:12ca63435e71191c1a135ad28b0480ab0060268b3b16a6f20686919b492d83fd"}, + {file = "pyinstrument-4.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:99c8dca1dddad2cebee0e96a7d285cad0f28341084c5616bde3fac87f0f38929"}, + {file = "pyinstrument-4.3.0.tar.gz", hash = "sha256:575c5e2581839a21800194842291e1348edecc6f4c67f8efeef8356588ea4c25"}, ] -pynvml = [ - {file = "pynvml-11.4.1-py3-none-any.whl", hash = "sha256:d27be542cd9d06558de18e2deffc8022ccd7355bc7382255d477038e7e424c6c"}, - {file = "pynvml-11.4.1.tar.gz", hash = "sha256:b2e4a33b80569d093b513f5804db0c7f40cfc86f15a013ae7a8e99c5e175d5dd"}, +pyparsing = [ + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, ] -rich = [ - {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, - {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, +pytest = [ + {file = "pytest-7.1.3-py3-none-any.whl", hash = "sha256:1377bda3466d70b55e3f5cecfa55bb7cfcf219c7964629b967c37cf0bda818b7"}, + {file = "pytest-7.1.3.tar.gz", hash = "sha256:4f365fec2dff9c1162f834d9f18af1ba13062db0c708bf7b946f8a5c76180c39"}, ] -scalene = [ - {file = "scalene-1.5.13-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:e69f7ee1749423f4dcfc8414219d10676f26691a28a90b7fbebd31141e20ed3a"}, - {file = "scalene-1.5.13-cp310-cp310-manylinux_2_24_x86_64.whl", hash = "sha256:2118b41159d7306d410bb1ed73c80bffe4b718bcc4bf4a723e2cf0badb31bf6a"}, - {file = "scalene-1.5.13-cp310-cp310-win_amd64.whl", hash = "sha256:321a94aff17523702157f9632d115fed62215ed64c607c528b97bab063f63b2f"}, - {file = "scalene-1.5.13-cp37-cp37m-macosx_10_15_universal2.whl", hash = "sha256:25938a6c7e8fa14898e69354515fb41e072aa06d83be4060e2dbd0192d87c2af"}, - {file = "scalene-1.5.13-cp37-cp37m-manylinux_2_24_x86_64.whl", hash = "sha256:e3dad1b5ce6d1329c9e2b595d55d50540ae8f1e8670e47509db593d1cb1b1809"}, - {file = "scalene-1.5.13-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:8959143c587807f2a341876efb40b770b95e3df94d5470ea0f2c5615a7ff186d"}, - {file = "scalene-1.5.13-cp38-cp38-manylinux_2_24_x86_64.whl", hash = "sha256:37a340c73d93b08c502ad3e553f434fd91bb4913955979b9386935ded137782b"}, - {file = "scalene-1.5.13-cp38-cp38-win_amd64.whl", hash = "sha256:8c51819f858f852106ad3b5da17d08575344dd45fa35a4311342dc6bcfb202ca"}, - {file = "scalene-1.5.13-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b99252336e6351c24b5410145f4729b3f9012d9833160f1ac5221ee19f6d2eff"}, - {file = "scalene-1.5.13-cp39-cp39-manylinux_2_24_x86_64.whl", hash = "sha256:fd4ec03141df635b6cc5cca9f801015e2fe6c71530727024febe28d4c749f502"}, - {file = "scalene-1.5.13-cp39-cp39-win_amd64.whl", hash = "sha256:0ff439b5fdd696303c4d1e7293d5689c9e579da4aa48940149db3f979578fdba"}, - {file = "scalene-1.5.13.tar.gz", hash = "sha256:0077d517249cfa6ad0953d85c9656bdaca57b863159bbb78969987d9bdbb539c"}, +pytest-cov = [ + {file = "pytest-cov-4.0.0.tar.gz", hash = "sha256:996b79efde6433cdbd0088872dbc5fb3ed7fe1578b68cdbba634f14bb8dd0470"}, + {file = "pytest_cov-4.0.0-py3-none-any.whl", hash = "sha256:2feb1b751d66a8bd934e5edfa2e961d11309dc37b73b0eabe73b5945fee20f6b"}, ] snakeviz = [ {file = "snakeviz-2.1.1-py2.py3-none-any.whl", hash = "sha256:931142dc927101c9a4b6e89bc0577ff1a3d1886b483a04e6af70c31d2c3dce19"}, @@ -287,6 +467,10 @@ super-py = [ {file = "super-py-0.4.4.tar.gz", hash = "sha256:25480917cd2767ac22fe1471535bb3807274ffbcb56e3998de91abb6cedb0700"}, {file = "super_py-0.4.4-py3-none-any.whl", hash = "sha256:76887ef89e550fc5ba2697e439ceb402478e9c1afd44f1c64040c304cd99d32a"}, ] +tomli = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] tornado = [ {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"}, {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"}, diff --git a/profiler.py b/profiler.py new file mode 100644 index 0000000..9acca39 --- /dev/null +++ b/profiler.py @@ -0,0 +1,14 @@ +import dictdatabase as DDB +from pyinstrument import profiler + + + +DDB.config.storage_directory = "./test_db/production_database" +DDB.config.use_orjson = True + +p = profiler.Profiler(interval=0.00001) +with p: + with DDB.at("tasks").session(key="fM44", as_PathDict=True) as (session, task): + task["jay"] = lambda x: (x or 0) + 1 + session.write() +p.open_in_browser() diff --git a/pyproject.toml b/pyproject.toml index eb90d0b..e692da0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "dictdatabase" -version = "1.3.0" +version = "1.4.0" repository = "https://github.com/mkrd/DictDataBase" description = "Easy-to-use database using dicts" authors = ["Marcel KrΓΆker "] @@ -16,16 +16,17 @@ classifiers=[ ] [tool.poetry.dependencies] -python = "^3.10" +python = "^3.8" path-dict = "^1.2.6" Brotli = "^1.0.9" +orjson = "^3.8.0" [tool.poetry.group.dev.dependencies] snakeviz = "^2.1.1" pycodestyle = "^2.9.1" -orjson = "^3.8.0" super-py = "^0.4.2" -scalene = "^1.5.13" +pyinstrument = "^4.3.0" +pytest-cov = "^4.0.0" [build-system] requires = ["poetry-core"] diff --git a/test_profiler.py b/test_profiler.py deleted file mode 100644 index a5c837a..0000000 --- a/test_profiler.py +++ /dev/null @@ -1,76 +0,0 @@ -import cProfile -import dictdatabase as DDB -import subprocess -import orjson -from scalene import scalene_profiler - - -DDB.config.storage_directory = "./test_db/production_database" - - - - - - -def orjson_decode(data_str, select_key: str = "fM44"): - - return orjson.loads(data_str) - # select_start = f'"{select_key}": ' - # key_index = data_str.find(select_start) - # print(key_index) - - # in_str = False - # in_lst = 0 - # in_dct = 0 - # for i in range(key_index + len(select_start), len(data_str)): - # if data_str[i] == '"' and data_str[i-1] != "\\": - # in_str = not in_str - # continue - # if in_str: - # continue - # if data_str[i] == "[": - # in_lst += 1 - # elif data_str[i] == "]": - # in_lst -= 1 - # elif data_str[i] == "{": - # in_dct += 1 - # elif data_str[i] == "}": - # in_dct -= 1 - # if in_lst == 0 and in_dct == 0: - # i += 1 - # if data_str[i] == ",": - # i += 1 - # break - - # load_str = data_str[key_index + len(select_start):i] - # if load_str[-1] == ",": - # load_str = load_str[:-1] - # print(load_str) - # loaded = orjson.loads(load_str) - # return loaded - - - - - -def orjson_encode(data_dict): - return orjson.dumps( - data_dict, - option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2, - ) - - -DDB.config.custom_json_encoder = orjson_encode -DDB.config.custom_json_decoder = orjson_decode - - -scalene_profiler.start() - - -with DDB.session("tasks") as (session, tasks): - print("sess", len(tasks)) - session.write() -scalene_profiler.stop() - -# command = "poetry run snakeviz test.prof" -# subprocess.call(command.split()) diff --git a/testing/__init__.py b/testing/__init__.py deleted file mode 100644 index aa7b7c3..0000000 --- a/testing/__init__.py +++ /dev/null @@ -1,89 +0,0 @@ -import dictdatabase as DDB -import super_py as sp -import shutil -import os -import orjson - - -def orjson_decode(data_str): - return orjson.loads(data_str) - - -def orjson_encode(data_dict): - return orjson.dumps( - data_dict, - option=orjson.OPT_SORT_KEYS | orjson.OPT_INDENT_2, - ) - - -def config_orjson(): - DDB.config.custom_json_encoder = orjson_encode - DDB.config.custom_json_decoder = orjson_decode - - - -def make_test_dir(): - DDB.config.storage_directory = ".ddb_storage_testing" - os.makedirs(DDB.config.storage_directory, exist_ok=True) - - - - - -def teardown(): - shutil.rmtree(".ddb_storage_testing") - DDB.config.custom_json_encoder = None - DDB.config.custom_json_decoder = None - - - -def setup(): - make_test_dir() - DDB.config.pretty_json_files = False - DDB.config.use_compression = False - - -def setup_pretty(): - setup() - DDB.config.pretty_json_files = True - - -def setup_compress(): - setup() - DDB.config.use_compression = True - - -def setup_orjson(): - setup() - config_orjson() - - -def setup_pretty_orjson(): - setup_pretty() - config_orjson() - - -def setup_compress_orjson(): - setup_compress() - config_orjson() - - - - -test_scenes = { - "(πŸ”΄ pretty) (πŸ”΄ compression) (πŸ”΄ orjson)": sp.test(setup, teardown, raise_assertion_errors=True), - "(🟒 pretty) (πŸ”΄ compression) (πŸ”΄ orjson)": sp.test(setup_pretty, teardown, raise_assertion_errors=True), - "(πŸ”΄ pretty) (🟒 compression) (πŸ”΄ orjson)": sp.test(setup_compress, teardown, raise_assertion_errors=True), - "(πŸ”΄ pretty) (πŸ”΄ compression) (🟒 orjson)": sp.test(setup_orjson, teardown, raise_assertion_errors=True), - "(🟒 pretty) (πŸ”΄ compression) (🟒 orjson)": sp.test(setup_pretty_orjson, teardown, raise_assertion_errors=True), - "(πŸ”΄ pretty) (🟒 compression) (🟒 orjson)": sp.test(setup_compress_orjson, teardown, raise_assertion_errors=True), -} - -test_scenes_no_teardown = { - "(πŸ”΄ pretty) (πŸ”΄ compression) (πŸ”΄ orjson)": sp.test(setup, raise_assertion_errors=True), - "(🟒 pretty) (πŸ”΄ compression) (πŸ”΄ orjson)": sp.test(setup_pretty, raise_assertion_errors=True), - "(πŸ”΄ pretty) (🟒 compression) (πŸ”΄ orjson)": sp.test(setup_compress, raise_assertion_errors=True), - "(πŸ”΄ pretty) (πŸ”΄ compression) (🟒 orjson)": sp.test(setup_orjson, raise_assertion_errors=True), - "(🟒 pretty) (πŸ”΄ compression) (🟒 orjson)": sp.test(setup_pretty_orjson, raise_assertion_errors=True), - "(πŸ”΄ pretty) (🟒 compression) (🟒 orjson)": sp.test(setup_compress_orjson, raise_assertion_errors=True), -} diff --git a/testing/run_tests.py b/testing/run_tests.py deleted file mode 100644 index c24836e..0000000 --- a/testing/run_tests.py +++ /dev/null @@ -1,32 +0,0 @@ -from inspect import getmembers, isfunction -from testing import test_scenes, test_scenes_no_teardown, teardown - - -print("🚧 Test create") -from testing import test_create -for scene, run_scene in test_scenes.items(): - print(scene) - for _, fn in getmembers(test_create, isfunction): - run_scene(fn) - - -print("🚧 Test exceptions") -from testing import test_excepts -for scene, run_scene in test_scenes.items(): - print(scene) - [run_scene(f) for _, f in getmembers(test_excepts, isfunction)] - - -print("🚧 Test read and write") -from testing import test_read_write -for scene, run_scene in test_scenes.items(): - print(scene) - [run_scene(f) for _, f in getmembers(test_read_write, isfunction)] - - -print("🚧 Test big db") -from testing import test_big_db -for scene, run_scene in test_scenes_no_teardown.items(): - print(scene) - [run_scene(f) for _, f in getmembers(test_big_db, isfunction)] - teardown() diff --git a/testing/test_big_db.py b/testing/test_big_db.py deleted file mode 100644 index 9f05394..0000000 --- a/testing/test_big_db.py +++ /dev/null @@ -1,24 +0,0 @@ - -import dictdatabase as DDB - - -def a_create(): - d = {"key1": "val1", "key2": 2, "key3": [1, "2", [3, 3]]} - for i in range(4): - d = {f"key{i}{j}": d for j in range(20)} - # About 22MB - DDB.create("_test_big_db", db=d) - - -def b_read(): - d = DDB.read("_test_big_db") - - -def c_open_session(): - with DDB.session("_test_big_db") as (session, d): - pass - - -def d_open_session_and_write(): - with DDB.session("_test_big_db") as (session, d): - session.write() diff --git a/testing/test_create.py b/testing/test_create.py deleted file mode 100644 index 92edfc2..0000000 --- a/testing/test_create.py +++ /dev/null @@ -1,35 +0,0 @@ -import contextlib -import dictdatabase as DDB - - -def file_creation(): - n = DDB.read("Non_existent") - assert n is None - - DDB.create("db1") - db = DDB.read("db1") - assert db == {} - - with DDB.session("db1", as_PathDict=True) as (session, d): - d["a", "b", "c"] = "dee" - assert d["a", "b", "c"] == "dee" - session.write() - assert DDB.read("db1") == {"a": {"b": {"c": "dee"}}} - - -def nested_file_creation(): - n = DDB.read("blobbles/bla/blub") - assert n is None - DDB.create("blobbles/osna/efforts", db={"val": [1, 2]}) - assert DDB.read("blobbles/osna/efforts") == {"val": [1, 2]} - - -def create_same_file_twice(): - # Check that creating the same file twice must raise an error - with contextlib.suppress(FileExistsError): - DDB.create("db1") - DDB.create("db1") - assert False - # Check that creating the same file twice with force_overwrite=True works - DDB.create("db2", force_overwrite=True) - DDB.create("db2", force_overwrite=True) diff --git a/testing/test_excepts.py b/testing/test_excepts.py deleted file mode 100644 index b6e84cc..0000000 --- a/testing/test_excepts.py +++ /dev/null @@ -1,34 +0,0 @@ -import dictdatabase as DDB - - -def except_during_open_session(): - d = {"test": "value"} - DDB.create("test", db=d) - try: - with DDB.session("test", as_PathDict=True) as (session, test): - raise Exception("Any Exception") - except Exception: - pass - - -def except_on_save_unserializable(): - try: - d = {"test": "value"} - DDB.create("test", db=d) - with DDB.session("test", as_PathDict=True) as (session, test): - test["test"] = {"key": set([1, 2, 2])} - session.write() - assert False - except TypeError: - assert True - - -def except_on_session_in_session(): - d = {"test": "value"} - DDB.create("test", db=d) - try: - with DDB.session("test", as_PathDict=True) as (session, test): - with DDB.session("test", as_PathDict=True) as (session2, test2): - assert False - except RuntimeError: - assert True diff --git a/testing/test_read_write.py b/testing/test_read_write.py deleted file mode 100644 index ff468ad..0000000 --- a/testing/test_read_write.py +++ /dev/null @@ -1,103 +0,0 @@ -import dictdatabase as DDB - - -def read_non_existent_json(): - DDB.config.use_compression = False - d = DDB.read("nonexistent") - assert d is None - - -def read_non_existent_ddb(): - DDB.config.use_compression = True - d = DDB.read("nonexistent") - assert d is None - - -def open_non_existent_json(): - DDB.config.use_compression = False - try: - with DDB.session("nonexistent", as_PathDict=True) as (session, d): - assert False - except Exception: - assert True - - -def open_non_existent_ddb(): - DDB.config.use_compression = True - try: - with DDB.session("nonexistent", as_PathDict=True) as (session, d): - assert False - except Exception: - assert True - - -def write_json_read_json(): - DDB.config.use_compression = False - d = {"test": "value"} - DDB.create("test", db=d) - dd = DDB.read("test") - assert d == dd - - -def write_ddb_read_ddb(): - DDB.config.use_compression = True - d = {"test": "value"} - DDB.create("test", db=d) - dd = DDB.read("test") - assert d == dd - - -def write_json_read_ddb(): - DDB.config.use_compression = False - d = {"test": "value"} - DDB.create("test", db=d) - DDB.config.use_compression = True - dd = DDB.read("test") - assert d == dd - - -def write_ddb_read_json(): - DDB.config.use_compression = True - d = {"test": "value"} - DDB.create("test", db=d) - DDB.config.use_compression = False - dd = DDB.read("test") - assert d == dd - - -def write_json_write_json(): - DDB.config.use_compression = False - d = {"test": "value"} - DDB.create("test", db=d) - with DDB.session("test", as_PathDict=True) as (session, dd): - assert d == dd.dict - session.write() - - -def write_ddb_write_ddb(): - DDB.config.use_compression = True - d = {"test": "value"} - DDB.create("test", db=d) - with DDB.session("test", as_PathDict=True) as (session, dd): - assert d == dd.dict - session.write() - - -def write_ddb_write_json(): - DDB.config.use_compression = True - d = {"test": "value"} - DDB.create("test", db=d) - DDB.config.use_compression = False - with DDB.session("test", as_PathDict=True) as (session, dd): - assert d == dd.dict - session.write() - - -def write_json_write_ddb(): - DDB.config.use_compression = False - d = {"test": "value"} - DDB.create("test", db=d) - DDB.config.use_compression = True - with DDB.session("test", as_PathDict=True) as (session, dd): - assert d == dd.dict - session.write() diff --git a/testing/utils.py b/testing/utils.py deleted file mode 100644 index b7b931e..0000000 --- a/testing/utils.py +++ /dev/null @@ -1,30 +0,0 @@ -import dictdatabase as DDB -import json -import os - -def incr_db(n, tables): - for _ in range(n): - for t in range(tables): - d = DDB.read(f"incr{t}") - with DDB.session(f"incr{t}", as_PathDict=True) as (session, d): - d["counter"] = lambda x: (x or 0) + 1 - session.write() - return True - - - - -def make_table(recursion_depth=4, keys_per_level=20): - - incr = {"counter": 0} - d = {"key1": "val1", "key2": 2, "key3": [1, "2", [3, 3]]} - for i in range(recursion_depth): - d = {f"key{i}{j}": d for j in range(keys_per_level)} - incr["big"] = d - return incr - - -def get_tasks_json(): - print(os.getcwd()) - with open("test_db/production_database/tasks.json", "rb") as f: - return json.load(f) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/benchmark/big_db.py b/tests/benchmark/big_db.py new file mode 100644 index 0000000..c7d7da5 --- /dev/null +++ b/tests/benchmark/big_db.py @@ -0,0 +1,38 @@ +import dictdatabase as DDB +from pyinstrument import profiler + + +def a_create(): + d = {"key1": "val1", "key2": 2, "key3": [1, "2", [3, 3]]} + for i in range(4): + d = {f"key{i}{j}": d for j in range(20)} + # About 22MB + DDB.at("_test_big_db").create(d, force_overwrite=True) + + +def b_read(): + d = DDB.at("_test_big_db").read() + + +def c_session(): + with DDB.at("_test_big_db").session() as (session, d): + session.write() + + + + + +p = profiler.Profiler(interval=0.00001) +p.start() + +for f in [a_create, b_read, c_session]: + for uc in [False, True]: + for uo in [False, True]: + for sc in [False, True]: + for id in [None, 0, 2, "\t"]: + # TODO: missing configs + f() + +p.stop() + +p.open_in_browser(timeline=True) diff --git a/testing/benchmark/run_parallel.py b/tests/benchmark/run_parallel.py similarity index 72% rename from testing/benchmark/run_parallel.py rename to tests/benchmark/run_parallel.py index 4d5f92b..0b9a0d9 100644 --- a/testing/benchmark/run_parallel.py +++ b/tests/benchmark/run_parallel.py @@ -2,23 +2,23 @@ import super_py as sp import time import os -from testing import utils, test_scenes, orjson_encode, orjson_decode +from tests import utils, test_scenes, orjson_encode, orjson_decode import orjson from multiprocessing import Pool import cProfile import subprocess -def incr_db(n, tables, ddb_sd, ddb_pj, ddb_uc): +def incr_db(n, tables, sd, uc, uo, id, sk): print("parallel_runner incr_db") - DDB.config.storage_directory = ddb_sd - DDB.config.pretty_json_files = ddb_pj - DDB.config.use_compression = ddb_uc - DDB.config.custom_json_encoder = orjson_encode - DDB.config.custom_json_decoder = orjson_decode + DDB.config.storage_directory = sd + DDB.config.use_compression = uc + DDB.config.use_orjson = uo + DDB.config.indent = id + DDB.config.sort_keys = sk for _ in range(n): for t in range(tables): - with DDB.session(f"incr{t}", as_PathDict=True) as (session, d): + with DDB.at(f"incr{t}").session(as_PathDict=True) as (session, d): d["counter"] = lambda x: (x or 0) + 1 session.write() return True @@ -27,7 +27,7 @@ def incr_db(n, tables, ddb_sd, ddb_pj, ddb_uc): def parallel_stress(tables=1, processes=8, per_process=8): # Create Tables for t in range(tables): - DDB.create(f"incr{t}", db=utils.get_tasks_json()) + DDB.at(f"incr{t}").create(utils.get_tasks_json()) # Execute process pool running incr_db as the target task t1 = time.time() @@ -40,8 +40,10 @@ def parallel_stress(tables=1, processes=8, per_process=8): per_process, tables, DDB.config.storage_directory, - DDB.config.pretty_json_files, DDB.config.use_compression, + DDB.config.use_orjson, + DDB.config.indent, + DDB.config.sort_keys, )) pool.close() pool.join() @@ -52,9 +54,9 @@ def parallel_stress(tables=1, processes=8, per_process=8): print(f"{ops = }, {ops_sec = }, {tables = }, {processes = }") for t in range(tables): - db = DDB.read(f"incr{t}") + db = DDB.at(f"incr{t}").read() print(f"βœ… {db['counter'] = } == {per_process * processes = }") - assert DDB.read(f"incr{t}")["counter"] == processes * per_process + assert DDB.at(f"incr{t}").read()["counter"] == processes * per_process if __name__ == "__main__": diff --git a/testing/benchmark/run_threaded.py b/tests/benchmark/run_threaded.py similarity index 85% rename from testing/benchmark/run_threaded.py rename to tests/benchmark/run_threaded.py index 168b658..a2465dd 100644 --- a/testing/benchmark/run_threaded.py +++ b/tests/benchmark/run_threaded.py @@ -3,7 +3,7 @@ import time import cProfile import subprocess -from testing import test_scenes, utils +from tests import test_scenes, utils @@ -11,9 +11,9 @@ def increment_counters(n, tables): for _ in range(n): for t in range(tables): # Perform a useless read operation - d = DDB.read(f"incr{t}") + d = DDB.at(f"incr{t}").read() # Perform a counter increment - with DDB.session(f"incr{t}", as_PathDict=True) as (session, d): + with DDB.at(f"incr{t}").session(as_PathDict=True) as (session, d): d["counter"] = lambda x: (x or 0) + 1 session.write() return True @@ -22,7 +22,7 @@ def increment_counters(n, tables): def test_stress_threaded(tables=1, threads=4, per_thread=3): # Create tables for t in range(tables): - DDB.create(f"incr{t}", db=utils.make_table()) + DDB.at(f"incr{t}").create(utils.make_table()) # Create tasks for concurrent execution tasks = [(increment_counters, (per_thread, tables)) for _ in range(threads)] @@ -40,7 +40,7 @@ def test_stress_threaded(tables=1, threads=4, per_thread=3): # Check correctness of results assert results == [True] * threads for t in range(tables): - db = DDB.read(f"incr{t}") + db = DDB.at(f"incr{t}").read() assert db["counter"] == threads * per_thread print(f"βœ… {db['counter'] = } == {per_thread * threads = }") diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..3bebf5e --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,33 @@ +import dictdatabase as DDB +import pytest +import shutil + +@pytest.fixture(scope="session") +def env(request): + dir = "./.ddb_pytest_storage" + DDB.config.storage_directory = dir + #request.addfinalizer(lambda: shutil.rmtree(dir)) + + + +@pytest.fixture(params=[True, False]) +def use_compression(request): + DDB.config.use_compression = request.param + + + +@pytest.fixture(params=[False, True]) +def use_orjson(request): + DDB.config.use_orjson = request.param + + + +@pytest.fixture(params=[False, True]) +def sort_keys(request): + DDB.config.sort_keys = request.param + + + +@pytest.fixture(params=[None, 0, 2, "\t"]) +def indent(request): + DDB.config.indent = request.param diff --git a/tests/test_create.py b/tests/test_create.py new file mode 100644 index 0000000..08480dd --- /dev/null +++ b/tests/test_create.py @@ -0,0 +1,46 @@ +import dictdatabase as DDB +import pytest +import json + +from tests.utils import make_complex_nested_random_dict + + +def test_create(env, use_compression, use_orjson, sort_keys, indent): + DDB.at("test_create").create(force_overwrite=True) + db = DDB.at("test_create").read() + assert db == {} + + with DDB.at("test_create").session(as_PathDict=True) as (session, d): + d["a", "b", "c"] = "d" + session.write() + assert DDB.at("test_create").read() == {"a": {"b": {"c": "d"}}} + + +def test_create_edge_cases(env, use_compression, use_orjson, sort_keys, indent): + cases = [-2, 0.0, "", "x", [], {}, True] + + for i, c in enumerate(cases): + DDB.at(f"tcec{i}").create(c, force_overwrite=True) + assert DDB.at(f"tcec{i}").read() == c + + with pytest.raises(TypeError): + DDB.at("tcec99").create(object(), force_overwrite=True) + + +def test_nested_file_creation(env, use_compression, use_orjson, sort_keys, indent): + n = DDB.at("nested/file/nonexistent").read() + assert n is None + db = make_complex_nested_random_dict(12, 6) + DDB.at("nested/file/creation/test").create(db, force_overwrite=True) + assert DDB.at("nested/file/creation/test").read() == db + + +def test_create_same_file_twice(env, use_compression, use_orjson, sort_keys, indent): + name = "test_create_same_file_twice" + # Check that creating the same file twice must raise an error + with pytest.raises(FileExistsError): + DDB.at(name).create(force_overwrite=True) + DDB.at(name).create() + # Check that creating the same file twice with force_overwrite=True works + DDB.at(f"{name}2").create(force_overwrite=True) + DDB.at(f"{name}2").create(force_overwrite=True) diff --git a/tests/test_excepts.py b/tests/test_excepts.py new file mode 100644 index 0000000..7c16d91 --- /dev/null +++ b/tests/test_excepts.py @@ -0,0 +1,32 @@ +import dictdatabase as DDB +import pytest + + +def test_except_during_open_session(env, use_compression, use_orjson, sort_keys, indent): + name = "test_except_during_open_session" + d = {"test": "value"} + DDB.at(name).create(d, force_overwrite=True) + with pytest.raises(RuntimeError): + with DDB.at(name).session() as (session, test): + raise RuntimeError("Any Exception") + + + +def test_except_on_save_unserializable(env, use_compression, use_orjson, sort_keys, indent): + name = "test_except_on_save_unserializable" + with pytest.raises(TypeError): + d = {"test": "value"} + DDB.at(name).create(d, force_overwrite=True) + with DDB.at(name).session(as_PathDict=True) as (session, test): + test["test"] = {"key": {1, 2}} + session.write() + + +def test_except_on_session_in_session(env, use_compression, use_orjson, sort_keys, indent): + name = "test_except_on_session_in_session" + d = {"test": "value"} + DDB.at(name).create(d, force_overwrite=True) + with pytest.raises(RuntimeError): + with DDB.at(name).session(as_PathDict=True) as (session, test): + with DDB.at(name).session(as_PathDict=True) as (session2, test2): + pass diff --git a/tests/test_io_safe.py b/tests/test_io_safe.py new file mode 100644 index 0000000..1efc0f4 --- /dev/null +++ b/tests/test_io_safe.py @@ -0,0 +1,28 @@ +import dictdatabase as DDB +from dictdatabase import io_safe +import pytest +import json + + +def test_read(): + # Elicit read error + DDB.config.use_orjson = True + with pytest.raises(json.decoder.JSONDecodeError): + with open(f"{DDB.config.storage_directory}/corrupted_json.json", "w") as f: + f.write("This is not JSON") + io_safe.read("corrupted_json") + + +def test_partial_read(): + assert io_safe.partial_read("nonexistent", key="none") is None + + +def test_write(): + with pytest.raises(TypeError): + io_safe.write("nonexistent", lambda x: x) + + +def test_delete(): + DDB.at("to_be_deleted").create() + DDB.at("to_be_deleted").delete() + assert DDB.at("to_be_deleted").read() is None diff --git a/tests/test_partial.py b/tests/test_partial.py new file mode 100644 index 0000000..e955d75 --- /dev/null +++ b/tests/test_partial.py @@ -0,0 +1,45 @@ +import dictdatabase as DDB +import json +import pytest + + +def test_subread(env, use_compression, use_orjson, sort_keys, indent): + name = "test_subread" + j = { + "a": "Hello{}", + "b": [0, 1], + "c": {"d": "e"}, + } + + DDB.at(name).create(j, force_overwrite=True) + with pytest.raises(json.decoder.JSONDecodeError): + DDB.at(name).read("a") == "Hello}{" + + with pytest.raises(KeyError): + DDB.at(name).read("f") + + assert DDB.at(name).read("b") == [0, 1] + assert DDB.at(name).read("c") == {"d": "e"} + + j2 = {"a": {"b": "c"}, "b": {"d": "e"}} + DDB.at("test_subread2").create(j2, force_overwrite=True) + assert DDB.at("test_subread2").read("b") == {"d": "e"} + + +def test_subwrite(env, use_compression, use_orjson, sort_keys, indent): + name = "test_subwrite" + j = { + "b": {"0": 1}, + "c": {"d": "e"}, + } + + DDB.at(name).create(j, force_overwrite=True) + with DDB.at(name).session("c", as_PathDict=True) as (session, task): + task["f"] = lambda x: (x or 0) + 5 + session.write() + assert DDB.at(name).read("c") == {"d": "e", "f": 5} + + with DDB.at(name).session("b", as_PathDict=True) as (session, task): + task["f"] = lambda x: (x or 0) + 2 + session.write() + assert DDB.at(name).read("f") == 2 diff --git a/tests/test_read.py b/tests/test_read.py new file mode 100644 index 0000000..9c8c025 --- /dev/null +++ b/tests/test_read.py @@ -0,0 +1,44 @@ +from unicodedata import name +import dictdatabase as DDB +import pytest +from tests.utils import make_complex_nested_random_dict + + +def test_non_existent(env, use_compression, use_orjson, sort_keys, indent): + d = DDB.at("nonexistent").read() + assert d is None + + +def test_create_and_read(env, use_compression, use_orjson, sort_keys, indent): + name = "test_create_and_read" + d = make_complex_nested_random_dict(12, 6) + DDB.at(name).create(d, force_overwrite=True) + dd = DDB.at(name).read() + assert d == dd + + +def test_read_compression_switching(env, use_orjson, sort_keys, indent): + name = "test_read_compression_switching" + DDB.config.use_compression = False + d = make_complex_nested_random_dict(12, 6) + DDB.at(name).create(d, force_overwrite=True) + DDB.config.use_compression = True + dd = DDB.at(name).read() + assert d == dd + DDB.at(name).create(d, force_overwrite=True) + DDB.config.use_compression = False + dd = DDB.at(name).read() + assert d == dd + + +def test_multiread(env, use_compression, use_orjson, sort_keys, indent): + dl = [] + for i in range(3): + dl += [make_complex_nested_random_dict(12, 6)] + DDB.at(f"test_multiread/d{i}").create(dl[-1], force_overwrite=True) + + mr = DDB.at("test_multiread/*").read() + mr2 = DDB.at("test_multiread", "*").read() + assert mr == mr2 + mr = {k.replace("test_multiread/", ""): v for k, v in mr.items()} + assert mr == {f"d{i}": dl[i] for i in range(3)} diff --git a/tests/test_write.py b/tests/test_write.py new file mode 100644 index 0000000..e007de3 --- /dev/null +++ b/tests/test_write.py @@ -0,0 +1,41 @@ +from unicodedata import name +import dictdatabase as DDB +import pytest +from tests.utils import make_complex_nested_random_dict + + +def test_non_existent_session(env): + name = "test_non_existent_session" + with pytest.raises(FileNotFoundError): + with DDB.at(name).session() as (session, d): + session.write() + + +def test_write(env, use_compression, use_orjson, sort_keys, indent): + name = "test_write" + d = make_complex_nested_random_dict(12, 6) + DDB.at(name).create(d, force_overwrite=True) + with DDB.at(name).session() as (session, dd): + assert d == dd + session.write() + + +def test_write_compression_switching(env, use_orjson, sort_keys, indent): + name = "test_write_compression_switching" + DDB.config.use_compression = False + d = make_complex_nested_random_dict(12, 6) + DDB.at(name).create(d, force_overwrite=True) + with DDB.at(name).session() as (session, dd): + assert d == dd + session.write() + assert DDB.at(name).read() == d + DDB.config.use_compression = True + with DDB.at(name).session() as (session, dd): + assert d == dd + session.write() + assert DDB.at(name).read() == d + DDB.config.use_compression = False + with DDB.at(name).session() as (session, dd): + assert d == dd + session.write() + assert DDB.at(name).read() == d diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 0000000..ac6a76c --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,87 @@ +import dictdatabase as DDB +import random +import string +import json +import os + + + +def incr_db(n, tables): + for _ in range(n): + for t in range(tables): + d = DDB.at(f"incr{t}").read() + with DDB.at(f"incr{t}").session(as_PathDict=True) as (session, d): + d["counter"] = lambda x: (x or 0) + 1 + session.write() + return True + + + + +def make_table(recursion_depth=4, keys_per_level=20): + d = {"key1": "val1", "key2": 2, "key3": [1, "2", [3, 3]]} + for i in range(recursion_depth): + d = {f"key{i}{j}": d for j in range(keys_per_level)} + return {"counter": 0, "big": d} + + +def get_tasks_json(): + print(os.getcwd()) + with open("test_db/production_database/tasks.json", "rb") as f: + return json.load(f) + + +def make_complex_nested_random_dict(max_width, max_depth): + + def random_string(choices, md): + length = random.randint(0, max_width) + letters = string.ascii_letters + "".join(["\\", " ", "πŸš€", '"']) + return "".join(random.choices(letters, k=length)) + + def random_int(choices, md): + return random.randint(-1000, 1000) + + def random_float(choices, md): + return random.uniform(-1000, 1000) + + def random_bool(choices, md): + return random.choice([True, False]) + + def random_none(choices, md): + return None + + def random_list(choices, md): + if md == 0: + return [] + res = [] + for _ in range(random.randint(0, max_width)): + v = random.choice(choices)(choices, md - 1) + res += [v] + return res + + def random_dict(choices, md): + if md == 0: + return {} + res = {} + for _ in range(random.randint(0, max_width)): + k = random_string(choices, md) + v = random.choice(choices)(choices, md - 1) + res[k] = v + return res + + return random_dict([ + random_string, + random_int, + random_float, + random_bool, + random_none, + random_list, + random_dict + ], max_depth) + + +import json + +d = make_complex_nested_random_dict(10, 5) + +print(json.dumps(d, indent=2))