Skip to content

Update bugout token #160

Update bugout token

Update bugout token #160

GitHub Actions / JUnit Test Report failed May 2, 2024 in 0s

22452 tests run, 11810 passed, 10632 skipped, 10 failed.

Annotations

Check failure on line 948 in deeplake/enterprise/test_pytorch.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_pytorch.test_pytorch_data_decode

UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd9 in position 2: invalid continuation byte
Raw output
local_auth_ds = Dataset(path='./hub_pytest/test_pytorch/test_pytorch_data_decode', tensors=['generic', 'text', 'json', 'list', 'class_label', 'image'])
cat_path = '/home/runner/work/deeplake/deeplake/deeplake/tests/dummy_data/images/cat.jpeg'

    @requires_libdeeplake
    @requires_torch
    @pytest.mark.flaky
    @pytest.mark.slow
    def test_pytorch_data_decode(local_auth_ds, cat_path):
        with local_auth_ds as ds:
            ds.create_tensor("generic")
            for i in range(10):
                ds.generic.append(i)
            ds.create_tensor("text", htype="text")
            for i in range(10):
                ds.text.append(f"hello {i}")
            ds.create_tensor("json", htype="json")
            for i in range(10):
                ds.json.append({"x": i})
            ds.create_tensor("list", htype="list")
            for i in range(10):
                ds.list.append([i, i + 1])
            ds.create_tensor("class_label", htype="class_label")
            animals = [
                "cat",
                "dog",
                "bird",
                "fish",
                "horse",
                "cow",
                "pig",
                "sheep",
                "goat",
                "chicken",
            ]
            ds.class_label.extend(animals)
            ds.create_tensor("image", htype="image", sample_compression="jpeg")
            for i in range(10):
                ds.image.append(deeplake.read(cat_path))
    
        decode_method = {tensor: "data" for tensor in list(ds.tensors.keys())}
        ptds = (
            ds.dataloader()
            .transform(identity)
            .pytorch(decode_method=decode_method, collate_fn=identity_collate)
        )
>       for i, batch in enumerate(ptds):

deeplake/enterprise/test_pytorch.py:948: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
deeplake/enterprise/dataloader.py:881: in __next__
    return next(self._iterator)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/indra/pytorch/loader.py:155: in __next__
    return next(self._iterator)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/indra/pytorch/single_process_iterator.py:80: in __next__
    return self.get_data()
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/indra/pytorch/single_process_iterator.py:117: in get_data
    batch = self._next_data()
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/indra/pytorch/single_process_iterator.py:104: in _next_data
    sample[tensor] = bytes_to_text(sample[tensor], "list")
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

buffer = b'T=\xd9xJ\x7f', htype = 'list'

    def bytes_to_text(buffer, htype):
        buffer = bytes(buffer)
        if htype == "json":
            arr = np.empty(1, dtype=object)
            arr[0] = json.loads(bytes.decode(buffer), cls=HubJsonDecoder)
            return arr
        elif htype in ("list", "tag"):
>           lst = json.loads(bytes.decode(buffer), cls=HubJsonDecoder)
E           UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd9 in position 2: invalid continuation byte

deeplake/core/serialize.py:484: UnicodeDecodeError

Check failure on line 189 in deeplake/core/vectorstore/deep_memory/test_deepmemory.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_deepmemory.test_deepmemory_evaluate

AssertionError: assert {'recall@1': ...@3': 0.3, ...} == {'recall@1': ...@3': 0.6, ...}
  Differing items:
  {'recall@5': 0.3} != {'recall@5': 0.6}
  {'recall@3': 0.3} != {'recall@3': 0.6}
  {'recall@50': 0.5} != {'recall@50': 0.7}
  {'recall@100': 0.7} != {'recall@100': 0.9}
  {'recall@10': 0.4} != {'recall@10': 0.6}
  {'recall@1': 0.3} != {'recall@1': 0.4}
  Full diff:
    {
  +  'recall@1': 0.3,
  -  'recall@1': 0.4,
  +  'recall@10': 0.4,
  ?           +
  -  'recall@10': 0.6,
  -  'recall@100': 0.9,
  ?                  ^
  +  'recall@100': 0.7,
  ?                  ^
  -  'recall@3': 0.6,
  ?                ^
  +  'recall@3': 0.3,
  ?                ^
  -  'recall@5': 0.6,
  ?                ^
  +  'recall@5': 0.3,
  ?                ^
  -  'recall@50': 0.7,
  ?                 ^
  +  'recall@50': 0.5,
  ?                 ^
    }
Raw output
corpus_query_relevances_copy = ('hub://testingacc2/tmp811c_test_deepmemory_test_deepmemory_evaluate', ['0-dimensional biomaterials lack inductive pro...5107', 1]], [['32587939', 1]], ...], 'hub://testingacc2/tmp811c_test_deepmemory_test_deepmemory_evaluate_eval_queries')
questions_embeddings_and_relevances = (array([[-0.01518817,  0.02033963, -0.01228631, ..., -0.00286692,
        -0.0079668 , -0.00414979],
       [-0.003503...A treatment decreases endoplasmic reticulum stress in response to general endoplasmic reticulum stress markers.', ...])
hub_cloud_dev_token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'

    @pytest.mark.slow
    @pytest.mark.timeout(600)
    @pytest.mark.skipif(sys.platform == "win32", reason="Does not run on Windows")
    @requires_libdeeplake
    def test_deepmemory_evaluate(
        corpus_query_relevances_copy,
        questions_embeddings_and_relevances,
        hub_cloud_dev_token,
    ):
        corpus, _, _, query_path = corpus_query_relevances_copy
        (
            questions_embeddings,
            question_relevances,
            queries,
        ) = questions_embeddings_and_relevances
    
        db = VectorStore(
            corpus,
            runtime={"tensor_db": True},
            token=hub_cloud_dev_token,
        )
    
        # when qvs_params is wrong:
        with pytest.raises(ValueError):
            db.deep_memory.evaluate(
                queries=queries,
                embedding=questions_embeddings,
                relevance=question_relevances,
                qvs_params={
                    "log_queries": True,
                    "branch_name": "wrong_branch",
                },
            )
    
        # embedding_function is not provided in the constructor or in the eval method
        with pytest.raises(ValueError):
            db.deep_memory.evaluate(
                queries=queries,
                relevance=question_relevances,
                qvs_params={
                    "log_queries": True,
                    "branch_name": "wrong_branch",
                },
            )
    
        recall = db.deep_memory.evaluate(
            queries=queries,
            embedding=questions_embeddings,
            relevance=question_relevances,
            qvs_params={
                "branch": "queries",
            },
        )
    
>       assert recall["without model"] == {
            "recall@1": 0.4,
            "recall@3": 0.6,
            "recall@5": 0.6,
            "recall@10": 0.6,
            "recall@50": 0.7,
            "recall@100": 0.9,
        }
E       AssertionError: assert {'recall@1': ...@3': 0.3, ...} == {'recall@1': ...@3': 0.6, ...}
E         Differing items:
E         {'recall@5': 0.3} != {'recall@5': 0.6}
E         {'recall@3': 0.3} != {'recall@3': 0.6}
E         {'recall@50': 0.5} != {'recall@50': 0.7}
E         {'recall@100': 0.7} != {'recall@100': 0.9}
E         {'recall@10': 0.4} != {'recall@10': 0.6}
E         {'recall@1': 0.3} != {'recall@1': 0.4}
E         Full diff:
E           {
E         +  'recall@1': 0.3,
E         -  'recall@1': 0.4,
E         +  'recall@10': 0.4,
E         ?           +
E         -  'recall@10': 0.6,
E         -  'recall@100': 0.9,
E         ?                  ^
E         +  'recall@100': 0.7,
E         ?                  ^
E         -  'recall@3': 0.6,
E         ?                ^
E         +  'recall@3': 0.3,
E         ?                ^
E         -  'recall@5': 0.6,
E         ?                ^
E         +  'recall@5': 0.3,
E         ?                ^
E         -  'recall@50': 0.7,
E         ?                 ^
E         +  'recall@50': 0.5,
E         ?                 ^
E           }

deeplake/core/vectorstore/deep_memory/test_deepmemory.py:189: AssertionError

Check failure on line 683 in deeplake/api/tests/test_link.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_link.test_creds

deeplake.util.exceptions.DatasetCorruptError: Exception occurred (see Traceback). The dataset maybe corrupted. Try using `reset=True` to reset HEAD changes and load the previous commit.
Raw output
path = 'hub://testingacc2/tmpb5e9_test_link_test_creds', runtime = None
read_only = None, overwrite = False, public = False, memory_cache_size = 2000
local_cache_size = 0, creds = {}
token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'
org_id = None, verbose = True, access_method = 'stream', unlink = False
reset = False, check_integrity = False, lock_enabled = True, lock_timeout = 0
index_params = None, indra = False

    @staticmethod
    @spinner
    def init(
        path: Union[str, pathlib.Path],
        runtime: Optional[Dict] = None,
        read_only: Optional[bool] = None,
        overwrite: bool = False,
        public: bool = False,
        memory_cache_size: int = DEFAULT_MEMORY_CACHE_SIZE,
        local_cache_size: int = DEFAULT_LOCAL_CACHE_SIZE,
        creds: Optional[Union[Dict, str]] = None,
        token: Optional[str] = None,
        org_id: Optional[str] = None,
        verbose: bool = True,
        access_method: str = "stream",
        unlink: bool = False,
        reset: bool = False,
        check_integrity: Optional[bool] = False,
        lock_enabled: Optional[bool] = True,
        lock_timeout: Optional[int] = 0,
        index_params: Optional[Dict[str, Union[int, str]]] = None,
        indra: bool = USE_INDRA,
    ):
        """Returns a :class:`~deeplake.core.dataset.Dataset` object referencing either a new or existing dataset.
    
        Examples:
    
            >>> ds = deeplake.dataset("hub://username/dataset")
            >>> ds = deeplake.dataset("s3://mybucket/my_dataset")
            >>> ds = deeplake.dataset("./datasets/my_dataset", overwrite=True)
    
            Loading to a specfic version:
    
            >>> ds = deeplake.dataset("hub://username/dataset@new_branch")
            >>> ds = deeplake.dataset("hub://username/dataset@3e49cded62b6b335c74ff07e97f8451a37aca7b2)
    
            >>> my_commit_id = "3e49cded62b6b335c74ff07e97f8451a37aca7b2"
            >>> ds = deeplake.dataset(f"hub://username/dataset@{my_commit_id}")
    
        Args:
            path (str, pathlib.Path): - The full path to the dataset. Can be:
                - a Deep Lake cloud path of the form ``hub://username/datasetname``. To write to Deep Lake cloud datasets, ensure that you are authenticated to Deep Lake (pass in a token using the 'token' parameter).
                - an s3 path of the form ``s3://bucketname/path/to/dataset``. Credentials are required in either the environment or passed to the creds argument.
                - a local file system path of the form ``./path/to/dataset`` or ``~/path/to/dataset`` or ``path/to/dataset``.
                - a memory path of the form ``mem://path/to/dataset`` which doesn't save the dataset but keeps it in memory instead. Should be used only for testing as it does not persist.
                - Loading to a specific version:
    
                    - You can also specify a ``commit_id`` or ``branch`` to load the dataset to that version directly by using the ``@`` symbol.
                    - The path will then be of the form ``hub://username/dataset@{branch}`` or ``hub://username/dataset@{commit_id}``.
                    - See examples above.
            runtime (dict): Parameters for Activeloop DB Engine. Only applicable for hub:// paths.
            read_only (bool, optional): Opens dataset in read only mode if this is passed as ``True``. Defaults to ``False``.
                Datasets stored on Deep Lake cloud that your account does not have write access to will automatically open in read mode.
            overwrite (bool): If set to ``True`` this overwrites the dataset if it already exists. Defaults to ``False``.
            public (bool): Defines if the dataset will have public access. Applicable only if Deep Lake cloud storage is used and a new Dataset is being created. Defaults to ``True``.
            memory_cache_size (int): The size of the memory cache to be used in MB.
            local_cache_size (int): The size of the local filesystem cache to be used in MB.
            creds (dict, str, optional): The string ``ENV`` or a dictionary containing credentials used to access the dataset at the path.
                - If 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token' are present, these take precedence over credentials present in the environment or in credentials file. Currently only works with s3 paths.
                - It supports 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token', 'endpoint_url', 'aws_region', 'profile_name' as keys.
                - If 'ENV' is passed, credentials are fetched from the environment variables. This is also the case when creds is not passed for cloud datasets. For datasets connected to hub cloud, specifying 'ENV' will override the credentials fetched from Activeloop and use local ones.
            token (str, optional): Activeloop token, used for fetching credentials to the dataset at path if it is a Deep Lake dataset. This is optional, tokens are normally autogenerated.
            org_id (str, Optional): Organization id to be used for enabling high-performance features. Only applicable for local datasets.
            verbose (bool): If ``True``, logs will be printed. Defaults to ``True``.
            access_method (str): The access method to use for the dataset. Can be:
    
                    - 'stream'
    
                        - Streams the data from the dataset i.e. only fetches data when required. This is the default value.
    
                    - 'download'
    
                        - Downloads the data to the local filesystem to the path specified in environment variable ``DEEPLAKE_DOWNLOAD_PATH``.
                          This will overwrite ``DEEPLAKE_DOWNLOAD_PATH``.
                        - Raises an exception if ``DEEPLAKE_DOWNLOAD_PATH`` environment variable is not set or if the dataset does not exist.
                        - The 'download' access method can be modified to specify num_workers and/or scheduler.
                          For example: 'download:2:processed' will use 2 workers and use processed scheduler, while 'download:3' will use 3 workers and
                          default scheduler (threaded), and 'download:processed' will use a single worker and use processed scheduler.
    
                    - 'local'
    
                        - Downloads the dataset if it doesn't already exist, otherwise loads from local storage.
                        - Raises an exception if ``DEEPLAKE_DOWNLOAD_PATH`` environment variable is not set.
                        - The 'local' access method can be modified to specify num_workers and/or scheduler to be used in case dataset needs to be downloaded.
                          If dataset needs to be downloaded, 'local:2:processed' will use 2 workers and use processed scheduler, while 'local:3' will use 3 workers
                          and default scheduler (threaded), and 'local:processed' will use a single worker and use processed scheduler.
            unlink (bool): Downloads linked samples if set to ``True``. Only applicable if ``access_method`` is ``download`` or ``local``. Defaults to ``False``.
            reset (bool): If the specified dataset cannot be loaded due to a corrupted HEAD state of the branch being loaded,
                          setting ``reset=True`` will reset HEAD changes and load the previous version.
            check_integrity (bool, Optional): Performs an integrity check by default (None) if the dataset has 20 or fewer tensors.
                                              Set to ``True`` to force integrity check, ``False`` to skip integrity check.
            lock_timeout (int): Number of seconds to wait before throwing a LockException. If None, wait indefinitely
            lock_enabled (bool): If true, the dataset manages a write lock. NOTE: Only set to False if you are managing concurrent access externally
            index_params: Optional[Dict[str, Union[int, str]]] = None : The index parameters used while creating vector store is passed down to dataset.
            indra (bool): Flag indicating whether indra api should be used to create the dataset. Defaults to false
    
        ..
            # noqa: DAR101
    
        Returns:
            Dataset: Dataset created using the arguments provided.
    
        Raises:
            AgreementError: When agreement is rejected
            UserNotLoggedInException: When user is not authenticated
            InvalidTokenException: If the specified token is invalid
            TokenPermissionError: When there are permission or other errors related to token
            CheckoutError: If version address specified in the path cannot be found
            DatasetCorruptError: If loading the dataset failed due to corruption and ``reset`` is not ``True``
            ValueError: If version is specified in the path when creating a dataset or If the org id is provided but dataset is ot local, or If the org id is provided but dataset is ot local
            ReadOnlyModeError: If reset is attempted in read-only mode
            LockedException: When attempting to open a dataset for writing when it is locked by another machine
            DatasetHandlerError: If overwriting the dataset fails
            Exception: Re-raises caught exception if reset cannot fix the issue
    
        Danger:
            Setting ``overwrite`` to ``True`` will delete all of your data if it exists! Be very careful when setting this parameter.
    
        Warning:
            Setting ``access_method`` to download will overwrite the local copy of the dataset if it was previously downloaded.
    
        Note:
            Any changes made to the dataset in download / local mode will only be made to the local copy and will not be reflected in the original dataset.
        """
        _check_indra_and_read_only_flags(indra, read_only)
        access_method, num_workers, scheduler = parse_access_method(access_method)
        check_access_method(access_method, overwrite, unlink)
    
        path, address = process_dataset_path(path)
        verify_dataset_name(path)
    
        if org_id is not None and get_path_type(path) != "local":
            raise ValueError("org_id parameter can only be used with local datasets")
    
        if creds is None:
            creds = {}
    
        db_engine = parse_runtime_parameters(path, runtime)["tensor_db"]
    
        try:
            storage, cache_chain = get_storage_and_cache_chain(
                path=path,
                db_engine=db_engine,
                read_only=read_only,
                creds=creds,
                token=token,
                memory_cache_size=memory_cache_size,
                local_cache_size=local_cache_size,
                indra=indra,
            )
    
            feature_report_path(path, "dataset", {"Overwrite": overwrite}, token=token)
        except Exception as e:
            if isinstance(e, UserNotLoggedInException):
                raise UserNotLoggedInException from None
            raise
        ds_exists = dataset_exists(cache_chain)
    
        if ds_exists:
            if overwrite:
                if not dataset._allow_delete(cache_chain):
                    raise DatasetHandlerError(
                        "Dataset overwrite failed. The dataset is marked as allow_delete=false. To allow overwrite, you must first run `allow_delete = True` on the dataset."
                    )
    
                try:
                    cache_chain.clear()
                except Exception as e:
                    raise DatasetHandlerError(
                        "Dataset overwrite failed. See traceback for more information."
                    ) from e
                create = True
            else:
                create = False
        else:
            create = True
    
        if create and address:
            raise ValueError(
                "deeplake.dataset does not accept version address when writing a dataset."
            )
    
        dataset_kwargs: Dict[str, Union[None, str, bool, int, Dict]] = {
            "path": path,
            "read_only": read_only,
            "token": token,
            "org_id": org_id,
            "verbose": verbose,
            "lock_timeout": lock_timeout,
            "lock_enabled": lock_enabled,
            "index_params": index_params,
        }
    
        if access_method == "stream":
            dataset_kwargs.update(
                {
                    "address": address,
                    "storage": cache_chain,
                    "public": public,
                }
            )
        else:
            dataset_kwargs.update(
                {
                    "access_method": access_method,
                    "memory_cache_size": memory_cache_size,
                    "local_cache_size": local_cache_size,
                    "creds": creds,
                    "ds_exists": ds_exists,
                    "num_workers": num_workers,
                    "scheduler": scheduler,
                    "reset": reset,
                    "unlink": unlink,
                }
            )
    
        try:
>           return dataset._load(
                dataset_kwargs, access_method, create, check_integrity=check_integrity
            )

deeplake\api\dataset.py:312: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
deeplake\api\dataset.py:778: in _load
    ret = dataset_factory(**dataset_kwargs)
deeplake\core\dataset\__init__.py:23: in dataset_factory
    ds = clz(path=path, *args, **kwargs)
deeplake\core\dataset\dataset.py:294: in __init__
    self._first_load_init()
deeplake\core\dataset\deeplake_cloud_dataset.py:41: in _first_load_init
    self.link_creds.populate_all_managed_creds(verbose=self.verbose and verbose)
deeplake\core\link_creds.py:281: in populate_all_managed_creds
    self.populate_single_managed_creds(creds_key, verbose=verbose)
deeplake\core\link_creds.py:292: in populate_single_managed_creds
    creds = self.fetch_managed_creds(creds_key, verbose=verbose)
deeplake\core\link_creds.py:296: in fetch_managed_creds
    creds = self.client.get_managed_creds(self.org_id, creds_key)
deeplake\client\client.py:277: in get_managed_creds
    resp = self.request(
deeplake\client\client.py:148: in request
    check_response_status(response)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

response = <Response [504]>

    def check_response_status(response: requests.Response):
        """Check response status and throw corresponding exception on failure."""
        code = response.status_code
        if code >= 200 and code < 300:
            return
    
        try:
            message = response.json()["description"]
        except Exception:
            message = " "
    
        if code == 400:
            raise BadRequestException(message)
        elif response.status_code == 401:
            raise AuthenticationException
        elif response.status_code == 403:
            raise AuthorizationException(message, response=response)
        elif response.status_code == 404:
            if message != " ":
                raise ResourceNotFoundException(message)
            raise ResourceNotFoundException
        elif response.status_code == 422:
            raise UnprocessableEntityException(message)
        elif response.status_code == 423:
            raise LockedException
        elif response.status_code == 429:
            raise OverLimitException
        elif response.status_code == 502:
            raise BadGatewayException
        elif response.status_code == 504:
>           raise GatewayTimeoutException
E           deeplake.util.exceptions.GatewayTimeoutException: Activeloop server took too long to respond.

deeplake\client\utils.py:74: GatewayTimeoutException

The above exception was the direct cause of the following exception:

hub_cloud_ds_generator = <function hub_cloud_ds_generator.<locals>.generate_hub_cloud_ds at 0x000001D193208550>
cat_path = 'D:\\a\\deeplake\\deeplake\\deeplake\\tests\\dummy_data\\images\\cat.jpeg'

    @pytest.mark.slow
    def test_creds(hub_cloud_ds_generator, cat_path):
        creds_key = "ENV"
        ds = hub_cloud_ds_generator()
        ds.add_creds_key(creds_key)
        ds.populate_creds(creds_key, from_environment=True)
        with ds:
            tensor = ds.create_tensor("abc", "link[image]", sample_compression="jpeg")
            seq_tensor = ds.create_tensor(
                "xyz", "sequence[link[image]]", sample_compression="jpeg"
            )
            tensor.append(deeplake.link(cat_path, creds_key))
            seq_tensor.append(
                [deeplake.link(cat_path, creds_key), deeplake.link(cat_path, creds_key)]
            )
    
        assert tensor[0].creds_key() == creds_key
        assert seq_tensor[0].creds_key() == [creds_key, creds_key]
        ds.add_creds_key("aws_creds", True)
        assert ds.get_managed_creds_keys() == {"aws_creds"}
        assert set(ds.get_creds_keys()) == {"aws_creds", "ENV"}
        ds.update_creds_key("aws_creds", managed=True)
>       ds = hub_cloud_ds_generator()

deeplake\api\tests\test_link.py:683: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
deeplake\tests\dataset_fixtures.py:153: in generate_hub_cloud_ds
    return deeplake.dataset(hub_cloud_path, token=hub_cloud_dev_token, **kwargs)
deeplake\util\spinner.py:151: in inner
    return func(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

path = 'hub://testingacc2/tmpb5e9_test_link_test_creds', runtime = None
read_only = None, overwrite = False, public = False, memory_cache_size = 2000
local_cache_size = 0, creds = {}
token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'
org_id = None, verbose = True, access_method = 'stream', unlink = False
reset = False, check_integrity = False, lock_enabled = True, lock_timeout = 0
index_params = None, indra = False

    @staticmethod
    @spinner
    def init(
        path: Union[str, pathlib.Path],
        runtime: Optional[Dict] = None,
        read_only: Optional[bool] = None,
        overwrite: bool = False,
        public: bool = False,
        memory_cache_size: int = DEFAULT_MEMORY_CACHE_SIZE,
        local_cache_size: int = DEFAULT_LOCAL_CACHE_SIZE,
        creds: Optional[Union[Dict, str]] = None,
        token: Optional[str] = None,
        org_id: Optional[str] = None,
        verbose: bool = True,
        access_method: str = "stream",
        unlink: bool = False,
        reset: bool = False,
        check_integrity: Optional[bool] = False,
        lock_enabled: Optional[bool] = True,
        lock_timeout: Optional[int] = 0,
        index_params: Optional[Dict[str, Union[int, str]]] = None,
        indra: bool = USE_INDRA,
    ):
        """Returns a :class:`~deeplake.core.dataset.Dataset` object referencing either a new or existing dataset.
    
        Examples:
    
            >>> ds = deeplake.dataset("hub://username/dataset")
            >>> ds = deeplake.dataset("s3://mybucket/my_dataset")
            >>> ds = deeplake.dataset("./datasets/my_dataset", overwrite=True)
    
            Loading to a specfic version:
    
            >>> ds = deeplake.dataset("hub://username/dataset@new_branch")
            >>> ds = deeplake.dataset("hub://username/dataset@3e49cded62b6b335c74ff07e97f8451a37aca7b2)
    
            >>> my_commit_id = "3e49cded62b6b335c74ff07e97f8451a37aca7b2"
            >>> ds = deeplake.dataset(f"hub://username/dataset@{my_commit_id}")
    
        Args:
            path (str, pathlib.Path): - The full path to the dataset. Can be:
                - a Deep Lake cloud path of the form ``hub://username/datasetname``. To write to Deep Lake cloud datasets, ensure that you are authenticated to Deep Lake (pass in a token using the 'token' parameter).
                - an s3 path of the form ``s3://bucketname/path/to/dataset``. Credentials are required in either the environment or passed to the creds argument.
                - a local file system path of the form ``./path/to/dataset`` or ``~/path/to/dataset`` or ``path/to/dataset``.
                - a memory path of the form ``mem://path/to/dataset`` which doesn't save the dataset but keeps it in memory instead. Should be used only for testing as it does not persist.
                - Loading to a specific version:
    
                    - You can also specify a ``commit_id`` or ``branch`` to load the dataset to that version directly by using the ``@`` symbol.
                    - The path will then be of the form ``hub://username/dataset@{branch}`` or ``hub://username/dataset@{commit_id}``.
                    - See examples above.
            runtime (dict): Parameters for Activeloop DB Engine. Only applicable for hub:// paths.
            read_only (bool, optional): Opens dataset in read only mode if this is passed as ``True``. Defaults to ``False``.
                Datasets stored on Deep Lake cloud that your account does not have write access to will automatically open in read mode.
            overwrite (bool): If set to ``True`` this overwrites the dataset if it already exists. Defaults to ``False``.
            public (bool): Defines if the dataset will have public access. Applicable only if Deep Lake cloud storage is used and a new Dataset is being created. Defaults to ``True``.
            memory_cache_size (int): The size of the memory cache to be used in MB.
            local_cache_size (int): The size of the local filesystem cache to be used in MB.
            creds (dict, str, optional): The string ``ENV`` or a dictionary containing credentials used to access the dataset at the path.
                - If 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token' are present, these take precedence over credentials present in the environment or in credentials file. Currently only works with s3 paths.
                - It supports 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token', 'endpoint_url', 'aws_region', 'profile_name' as keys.
                - If 'ENV' is passed, credentials are fetched from the environment variables. This is also the case when creds is not passed for cloud datasets. For datasets connected to hub cloud, specifying 'ENV' will override the credentials fetched from Activeloop and use local ones.
            token (str, optional): Activeloop token, used for fetching credentials to the dataset at path if it is a Deep Lake dataset. This is optional, tokens are normally autogenerated.
            org_id (str, Optional): Organization id to be used for enabling high-performance features. Only applicable for local datasets.
            verbose (bool): If ``True``, logs will be printed. Defaults to ``True``.
            access_method (str): The access method to use for the dataset. Can be:
    
                    - 'stream'
    
                        - Streams the data from the dataset i.e. only fetches data when required. This is the default value.
    
                    - 'download'
    
                        - Downloads the data to the local filesystem to the path specified in environment variable ``DEEPLAKE_DOWNLOAD_PATH``.
                          This will overwrite ``DEEPLAKE_DOWNLOAD_PATH``.
                        - Raises an exception if ``DEEPLAKE_DOWNLOAD_PATH`` environment variable is not set or if the dataset does not exist.
                        - The 'download' access method can be modified to specify num_workers and/or scheduler.
                          For example: 'download:2:processed' will use 2 workers and use processed scheduler, while 'download:3' will use 3 workers and
                          default scheduler (threaded), and 'download:processed' will use a single worker and use processed scheduler.
    
                    - 'local'
    
                        - Downloads the dataset if it doesn't already exist, otherwise loads from local storage.
                        - Raises an exception if ``DEEPLAKE_DOWNLOAD_PATH`` environment variable is not set.
                        - The 'local' access method can be modified to specify num_workers and/or scheduler to be used in case dataset needs to be downloaded.
                          If dataset needs to be downloaded, 'local:2:processed' will use 2 workers and use processed scheduler, while 'local:3' will use 3 workers
                          and default scheduler (threaded), and 'local:processed' will use a single worker and use processed scheduler.
            unlink (bool): Downloads linked samples if set to ``True``. Only applicable if ``access_method`` is ``download`` or ``local``. Defaults to ``False``.
            reset (bool): If the specified dataset cannot be loaded due to a corrupted HEAD state of the branch being loaded,
                          setting ``reset=True`` will reset HEAD changes and load the previous version.
            check_integrity (bool, Optional): Performs an integrity check by default (None) if the dataset has 20 or fewer tensors.
                                              Set to ``True`` to force integrity check, ``False`` to skip integrity check.
            lock_timeout (int): Number of seconds to wait before throwing a LockException. If None, wait indefinitely
            lock_enabled (bool): If true, the dataset manages a write lock. NOTE: Only set to False if you are managing concurrent access externally
            index_params: Optional[Dict[str, Union[int, str]]] = None : The index parameters used while creating vector store is passed down to dataset.
            indra (bool): Flag indicating whether indra api should be used to create the dataset. Defaults to false
    
        ..
            # noqa: DAR101
    
        Returns:
            Dataset: Dataset created using the arguments provided.
    
        Raises:
            AgreementError: When agreement is rejected
            UserNotLoggedInException: When user is not authenticated
            InvalidTokenException: If the specified token is invalid
            TokenPermissionError: When there are permission or other errors related to token
            CheckoutError: If version address specified in the path cannot be found
            DatasetCorruptError: If loading the dataset failed due to corruption and ``reset`` is not ``True``
            ValueError: If version is specified in the path when creating a dataset or If the org id is provided but dataset is ot local, or If the org id is provided but dataset is ot local
            ReadOnlyModeError: If reset is attempted in read-only mode
            LockedException: When attempting to open a dataset for writing when it is locked by another machine
            DatasetHandlerError: If overwriting the dataset fails
            Exception: Re-raises caught exception if reset cannot fix the issue
    
        Danger:
            Setting ``overwrite`` to ``True`` will delete all of your data if it exists! Be very careful when setting this parameter.
    
        Warning:
            Setting ``access_method`` to download will overwrite the local copy of the dataset if it was previously downloaded.
    
        Note:
            Any changes made to the dataset in download / local mode will only be made to the local copy and will not be reflected in the original dataset.
        """
        _check_indra_and_read_only_flags(indra, read_only)
        access_method, num_workers, scheduler = parse_access_method(access_method)
        check_access_method(access_method, overwrite, unlink)
    
        path, address = process_dataset_path(path)
        verify_dataset_name(path)
    
        if org_id is not None and get_path_type(path) != "local":
            raise ValueError("org_id parameter can only be used with local datasets")
    
        if creds is None:
            creds = {}
    
        db_engine = parse_runtime_parameters(path, runtime)["tensor_db"]
    
        try:
            storage, cache_chain = get_storage_and_cache_chain(
                path=path,
                db_engine=db_engine,
                read_only=read_only,
                creds=creds,
                token=token,
                memory_cache_size=memory_cache_size,
                local_cache_size=local_cache_size,
                indra=indra,
            )
    
            feature_report_path(path, "dataset", {"Overwrite": overwrite}, token=token)
        except Exception as e:
            if isinstance(e, UserNotLoggedInException):
                raise UserNotLoggedInException from None
            raise
        ds_exists = dataset_exists(cache_chain)
    
        if ds_exists:
            if overwrite:
                if not dataset._allow_delete(cache_chain):
                    raise DatasetHandlerError(
                        "Dataset overwrite failed. The dataset is marked as allow_delete=false. To allow overwrite, you must first run `allow_delete = True` on the dataset."
                    )
    
                try:
                    cache_chain.clear()
                except Exception as e:
                    raise DatasetHandlerError(
                        "Dataset overwrite failed. See traceback for more information."
                    ) from e
                create = True
            else:
                create = False
        else:
            create = True
    
        if create and address:
            raise ValueError(
                "deeplake.dataset does not accept version address when writing a dataset."
            )
    
        dataset_kwargs: Dict[str, Union[None, str, bool, int, Dict]] = {
            "path": path,
            "read_only": read_only,
            "token": token,
            "org_id": org_id,
            "verbose": verbose,
            "lock_timeout": lock_timeout,
            "lock_enabled": lock_enabled,
            "index_params": index_params,
        }
    
        if access_method == "stream":
            dataset_kwargs.update(
                {
                    "address": address,
                    "storage": cache_chain,
                    "public": public,
                }
            )
        else:
            dataset_kwargs.update(
                {
                    "access_method": access_method,
                    "memory_cache_size": memory_cache_size,
                    "local_cache_size": local_cache_size,
                    "creds": creds,
                    "ds_exists": ds_exists,
                    "num_workers": num_workers,
                    "scheduler": scheduler,
                    "reset": reset,
                    "unlink": unlink,
                }
            )
    
        try:
            return dataset._load(
                dataset_kwargs, access_method, create, check_integrity=check_integrity
            )
        except (AgreementError, CheckoutError, LockedException) as e:
            raise e from None
        except Exception as e:
            if create:
                raise e
            if access_method == "stream":
                if not reset:
                    if isinstance(e, DatasetCorruptError):
                        raise DatasetCorruptError(
                            message=e.message,
                            action="Try using `reset=True` to reset HEAD changes and load the previous commit.",
                            cause=e.__cause__,
                        )
>                   raise DatasetCorruptError(
                        "Exception occurred (see Traceback). The dataset maybe corrupted. "
                        "Try using `reset=True` to reset HEAD changes and load the previous commit."
                    ) from e
E                   deeplake.util.exceptions.DatasetCorruptError: Exception occurred (see Traceback). The dataset maybe corrupted. Try using `reset=True` to reset HEAD changes and load the previous commit.

deeplake\api\dataset.py:328: DatasetCorruptError

Check failure on line 52 in deeplake/api/tests/test_views.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_views.test_view_token_only

deeplake.util.exceptions.BadRequestException: Invalid Request. One or more request parameters is incorrect.
object generator can't be used in 'await' expression
Raw output
hub_cloud_path = 'hub://testingacc2/tmpb5e9_test_views_test_view_token_only'
hub_cloud_dev_token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'
hub_cloud_dev_credentials = ('testingacc2', None)

    @pytest.mark.slow
    def test_view_token_only(
        hub_cloud_path, hub_cloud_dev_token, hub_cloud_dev_credentials
    ):
        ds = deeplake.empty(hub_cloud_path, token=hub_cloud_dev_token)
        with ds:
            populate(ds)
    
        ds = deeplake.load(hub_cloud_path, token=hub_cloud_dev_token)
        view = ds[50:100]
        view.save_view(id="50to100")
    
        ds = deeplake.load(hub_cloud_path, read_only=True, token=hub_cloud_dev_token)
        view = ds[25:100]
        view.save_view(id="25to100")
    
        ds = deeplake.load(hub_cloud_path, read_only=True, token=hub_cloud_dev_token)
    
        loaded = ds.load_view("50to100")
        np.testing.assert_array_equal(loaded.images.numpy(), ds[50:100].images.numpy())
        np.testing.assert_array_equal(loaded.labels.numpy(), ds[50:100].labels.numpy())
        assert loaded._vds.path == posixpath.join(hub_cloud_path, ".queries/50to100")
    
        loaded = ds.load_view("25to100")
        np.testing.assert_array_equal(loaded.images.numpy(), ds[25:100].images.numpy())
        np.testing.assert_array_equal(loaded.labels.numpy(), ds[25:100].labels.numpy())
        assert loaded._vds.path == posixpath.join(hub_cloud_path, ".queries/25to100")
    
        ds.delete_view("25to100")
>       deeplake.delete(hub_cloud_path, token=hub_cloud_dev_token)

deeplake\api\tests\test_views.py:52: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
deeplake\util\spinner.py:151: in inner
    return func(*args, **kwargs)
deeplake\api\dataset.py:905: in delete
    ds.delete(large_ok=large_ok)
deeplake\core\dataset\deeplake_cloud_dataset.py:246: in delete
    self.client.delete_dataset_entry(self.org_id, self.ds_name)
deeplake\client\client.py:306: in delete_dataset_entry
    self.request(
deeplake\client\client.py:148: in request
    check_response_status(response)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

response = <Response [400]>

    def check_response_status(response: requests.Response):
        """Check response status and throw corresponding exception on failure."""
        code = response.status_code
        if code >= 200 and code < 300:
            return
    
        try:
            message = response.json()["description"]
        except Exception:
            message = " "
    
        if code == 400:
>           raise BadRequestException(message)
E           deeplake.util.exceptions.BadRequestException: Invalid Request. One or more request parameters is incorrect.
E           object generator can't be used in 'await' expression

deeplake\client\utils.py:56: BadRequestException

Check failure on line 1063 in deeplake/core/vectorstore/test_deeplake_vectorstore.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_deeplake_vectorstore.test_update_embedding[embedding_fn3-hub_cloud_ds-None-None-None-None-vector_store_query-hub_cloud_dev_token]

deeplake.util.exceptions.BadRequestException: Invalid Request. One or more request parameters is incorrect.
object generator can't be used in 'await' expression
Raw output
ds = Dataset(path='hub://testingacc2/tmp5dc0_test_deeplake_vectorstore_test_update_embedding-embedding_fn3-hub_cloud_ds-None-None-None-None-vector_store_query-hub_cloud_dev_token-', tensors=[])
vector_store_hash_ids = None, vector_store_row_ids = None
vector_store_filters = None, vector_store_filter_udf = None
vector_store_query = "select * where metadata['a']==1"
init_embedding_function = <function embedding_fn3 at 0x7f3151230860>
hub_cloud_dev_token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'

    @requires_libdeeplake
    @pytest.mark.parametrize(
        "ds, vector_store_hash_ids, vector_store_row_ids, vector_store_filters, vector_store_filter_udf, vector_store_query, hub_cloud_dev_token",
        [
            (
                "local_auth_ds",
                "vector_store_hash_ids",
                None,
                None,
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                "vector_store_row_ids",
                None,
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                None,
                None,
                "vector_store_filter_udf",
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                None,
                "vector_store_filters",
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "hub_cloud_ds",
                None,
                None,
                None,
                None,
                "vector_store_query",
                "hub_cloud_dev_token",
            ),
        ],
        indirect=True,
    )
    @pytest.mark.parametrize("init_embedding_function", [embedding_fn3, None])
    @pytest.mark.slow
    @requires_libdeeplake
    def test_update_embedding(
        ds,
        vector_store_hash_ids,
        vector_store_row_ids,
        vector_store_filters,
        vector_store_filter_udf,
        vector_store_query,
        init_embedding_function,
        hub_cloud_dev_token,
    ):
        vector_store_filters = vector_store_filters or vector_store_filter_udf
    
        exec_option = "compute_engine"
        if vector_store_filter_udf:
            exec_option = "python"
    
        embedding_tensor = "embedding"
        embedding_source_tensor = "text"
        # dataset has a single embedding_tensor:
    
        path = ds.path
        vector_store = DeepLakeVectorStore(
            path=path,
            overwrite=True,
            verbose=False,
            exec_option=exec_option,
            embedding_function=init_embedding_function,
            index_params={"threshold": 10},
            token=hub_cloud_dev_token,
        )
    
        # add data to the dataset:
        metadatas[1:6] = [{"a": 1} for _ in range(5)]
        vector_store.add(id=ids, embedding=embeddings, text=texts, metadata=metadatas)
    
        # case 1: single embedding_source_tensor, single embedding_tensor, single embedding_function
        new_embedding_value = 100
        embedding_fn = get_embedding_function(embedding_value=new_embedding_value)
        vector_store.update_embedding(
            ids=vector_store_hash_ids,
            row_ids=vector_store_row_ids,
            filter=vector_store_filters,
            query=vector_store_query,
            embedding_function=embedding_fn,
            embedding_source_tensor=embedding_source_tensor,
            embedding_tensor=embedding_tensor,
        )
        assert_updated_vector_store(
            new_embedding_value,
            vector_store,
            vector_store_hash_ids,
            vector_store_row_ids,
            vector_store_filters,
            vector_store_query,
            embedding_fn,
            embedding_source_tensor,
            embedding_tensor,
            exec_option,
            num_changed_samples=5,
        )
    
        # case 2: single embedding_source_tensor, single embedding_tensor not specified, single embedding_function
        new_embedding_value = 100
        embedding_fn = get_embedding_function(embedding_value=new_embedding_value)
        vector_store.update_embedding(
            ids=vector_store_hash_ids,
            row_ids=vector_store_row_ids,
            filter=vector_store_filters,
            query=vector_store_query,
            embedding_function=embedding_fn,
            embedding_source_tensor=embedding_source_tensor,
        )
        assert_updated_vector_store(
            new_embedding_value,
            vector_store,
            vector_store_hash_ids,
            vector_store_row_ids,
            vector_store_filters,
            vector_store_query,
            embedding_fn,
            embedding_source_tensor,
            embedding_tensor,
            exec_option,
            num_changed_samples=5,
        )
    
        # case 3-4: single embedding_source_tensor, single embedding_tensor, single init_embedding_function
        if init_embedding_function is None:
            # case 3: errors out when init_embedding_function is not specified
            with pytest.raises(ValueError):
                vector_store.update_embedding(
                    ids=vector_store_hash_ids,
                    row_ids=vector_store_row_ids,
                    filter=vector_store_filters,
                    query=vector_store_query,
                    embedding_source_tensor=embedding_source_tensor,
                )
        else:
            # case 4
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_source_tensor=embedding_source_tensor,
            )
            assert_updated_vector_store(
                0,
                vector_store,
                vector_store_hash_ids,
                vector_store_row_ids,
                vector_store_filters,
                vector_store_query,
                init_embedding_function,
                embedding_source_tensor,
                embedding_tensor,
                exec_option,
                num_changed_samples=5,
            )
    
>       vector_store.delete_by_path(path, token=ds.token)

deeplake/core/vectorstore/test_deeplake_vectorstore.py:1063: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
deeplake/core/vectorstore/deeplake_vectorstore.py:490: in delete_by_path
    deeplake.delete(path, large_ok=True, token=token, force=force, creds=creds)
deeplake/util/spinner.py:151: in inner
    return func(*args, **kwargs)
deeplake/api/dataset.py:905: in delete
    ds.delete(large_ok=large_ok)
deeplake/core/dataset/deeplake_cloud_dataset.py:246: in delete
    self.client.delete_dataset_entry(self.org_id, self.ds_name)
deeplake/client/client.py:306: in delete_dataset_entry
    self.request(
deeplake/client/client.py:148: in request
    check_response_status(response)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

response = <Response [400]>

    def check_response_status(response: requests.Response):
        """Check response status and throw corresponding exception on failure."""
        code = response.status_code
        if code >= 200 and code < 300:
            return
    
        try:
            message = response.json()["description"]
        except Exception:
            message = " "
    
        if code == 400:
>           raise BadRequestException(message)
E           deeplake.util.exceptions.BadRequestException: Invalid Request. One or more request parameters is incorrect.
E           object generator can't be used in 'await' expression

deeplake/client/utils.py:56: BadRequestException

Check failure on line 189 in deeplake/core/vectorstore/deep_memory/test_deepmemory.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_deepmemory.test_deepmemory_evaluate

AssertionError: assert {'recall@1': ...@3': 0.5, ...} == {'recall@1': ...@3': 0.6, ...}
  Omitting 2 identical items, use -vv to show
  Differing items:
  {'recall@1': 0.3} != {'recall@1': 0.4}
  {'recall@3': 0.5} != {'recall@3': 0.6}
  {'recall@5': 0.5} != {'recall@5': 0.6}
  {'recall@100': 0.8} != {'recall@100': 0.9}
  Full diff:
    {
  -  'recall@1': 0.4,
  ?                ^
  +  'recall@1': 0.3,
  ?                ^
     'recall@10': 0.6,
  -  'recall@100': 0.9,
  ?                  ^
  +  'recall@100': 0.8,
  ?                  ^
  -  'recall@3': 0.6,
  ?                ^
  +  'recall@3': 0.5,
  ?                ^
  -  'recall@5': 0.6,
  ?                ^
  +  'recall@5': 0.5,
  ?                ^
     'recall@50': 0.7,
    }
Raw output
corpus_query_relevances_copy = ('hub://testingacc2/tmp5dc0_test_deepmemory_test_deepmemory_evaluate', ['0-dimensional biomaterials lack inductive pro...5107', 1]], [['32587939', 1]], ...], 'hub://testingacc2/tmp5dc0_test_deepmemory_test_deepmemory_evaluate_eval_queries')
questions_embeddings_and_relevances = (array([[-0.01518817,  0.02033963, -0.01228631, ..., -0.00286692,
        -0.0079668 , -0.00414979],
       [-0.003503...A treatment decreases endoplasmic reticulum stress in response to general endoplasmic reticulum stress markers.', ...])
hub_cloud_dev_token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'

    @pytest.mark.slow
    @pytest.mark.timeout(600)
    @pytest.mark.skipif(sys.platform == "win32", reason="Does not run on Windows")
    @requires_libdeeplake
    def test_deepmemory_evaluate(
        corpus_query_relevances_copy,
        questions_embeddings_and_relevances,
        hub_cloud_dev_token,
    ):
        corpus, _, _, query_path = corpus_query_relevances_copy
        (
            questions_embeddings,
            question_relevances,
            queries,
        ) = questions_embeddings_and_relevances
    
        db = VectorStore(
            corpus,
            runtime={"tensor_db": True},
            token=hub_cloud_dev_token,
        )
    
        # when qvs_params is wrong:
        with pytest.raises(ValueError):
            db.deep_memory.evaluate(
                queries=queries,
                embedding=questions_embeddings,
                relevance=question_relevances,
                qvs_params={
                    "log_queries": True,
                    "branch_name": "wrong_branch",
                },
            )
    
        # embedding_function is not provided in the constructor or in the eval method
        with pytest.raises(ValueError):
            db.deep_memory.evaluate(
                queries=queries,
                relevance=question_relevances,
                qvs_params={
                    "log_queries": True,
                    "branch_name": "wrong_branch",
                },
            )
    
        recall = db.deep_memory.evaluate(
            queries=queries,
            embedding=questions_embeddings,
            relevance=question_relevances,
            qvs_params={
                "branch": "queries",
            },
        )
    
>       assert recall["without model"] == {
            "recall@1": 0.4,
            "recall@3": 0.6,
            "recall@5": 0.6,
            "recall@10": 0.6,
            "recall@50": 0.7,
            "recall@100": 0.9,
        }
E       AssertionError: assert {'recall@1': ...@3': 0.5, ...} == {'recall@1': ...@3': 0.6, ...}
E         Omitting 2 identical items, use -vv to show
E         Differing items:
E         {'recall@1': 0.3} != {'recall@1': 0.4}
E         {'recall@3': 0.5} != {'recall@3': 0.6}
E         {'recall@5': 0.5} != {'recall@5': 0.6}
E         {'recall@100': 0.8} != {'recall@100': 0.9}
E         Full diff:
E           {
E         -  'recall@1': 0.4,
E         ?                ^
E         +  'recall@1': 0.3,
E         ?                ^
E            'recall@10': 0.6,
E         -  'recall@100': 0.9,
E         ?                  ^
E         +  'recall@100': 0.8,
E         ?                  ^
E         -  'recall@3': 0.6,
E         ?                ^
E         +  'recall@3': 0.5,
E         ?                ^
E         -  'recall@5': 0.6,
E         ?                ^
E         +  'recall@5': 0.5,
E         ?                ^
E            'recall@50': 0.7,
E           }

deeplake/core/vectorstore/deep_memory/test_deepmemory.py:189: AssertionError

Check failure on line 1063 in deeplake/core/vectorstore/test_deeplake_vectorstore.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_deeplake_vectorstore.test_update_embedding[embedding_fn3-hub_cloud_ds-None-None-None-None-vector_store_query-hub_cloud_dev_token]

deeplake.util.exceptions.BadRequestException: Invalid Request. One or more request parameters is incorrect.
object generator can't be used in 'await' expression
Raw output
ds = Dataset(path='hub://testingacc2/tmp6d82_test_deeplake_vectorstore_test_update_embedding-embedding_fn3-hub_cloud_ds-None-None-None-None-vector_store_query-hub_cloud_dev_token-', tensors=[])
vector_store_hash_ids = None, vector_store_row_ids = None
vector_store_filters = None, vector_store_filter_udf = None
vector_store_query = "select * where metadata['a']==1"
init_embedding_function = <function embedding_fn3 at 0x7f1dd8fb9940>
hub_cloud_dev_token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'

    @requires_libdeeplake
    @pytest.mark.parametrize(
        "ds, vector_store_hash_ids, vector_store_row_ids, vector_store_filters, vector_store_filter_udf, vector_store_query, hub_cloud_dev_token",
        [
            (
                "local_auth_ds",
                "vector_store_hash_ids",
                None,
                None,
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                "vector_store_row_ids",
                None,
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                None,
                None,
                "vector_store_filter_udf",
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                None,
                "vector_store_filters",
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "hub_cloud_ds",
                None,
                None,
                None,
                None,
                "vector_store_query",
                "hub_cloud_dev_token",
            ),
        ],
        indirect=True,
    )
    @pytest.mark.parametrize("init_embedding_function", [embedding_fn3, None])
    @pytest.mark.slow
    @requires_libdeeplake
    def test_update_embedding(
        ds,
        vector_store_hash_ids,
        vector_store_row_ids,
        vector_store_filters,
        vector_store_filter_udf,
        vector_store_query,
        init_embedding_function,
        hub_cloud_dev_token,
    ):
        vector_store_filters = vector_store_filters or vector_store_filter_udf
    
        exec_option = "compute_engine"
        if vector_store_filter_udf:
            exec_option = "python"
    
        embedding_tensor = "embedding"
        embedding_source_tensor = "text"
        # dataset has a single embedding_tensor:
    
        path = ds.path
        vector_store = DeepLakeVectorStore(
            path=path,
            overwrite=True,
            verbose=False,
            exec_option=exec_option,
            embedding_function=init_embedding_function,
            index_params={"threshold": 10},
            token=hub_cloud_dev_token,
        )
    
        # add data to the dataset:
        metadatas[1:6] = [{"a": 1} for _ in range(5)]
        vector_store.add(id=ids, embedding=embeddings, text=texts, metadata=metadatas)
    
        # case 1: single embedding_source_tensor, single embedding_tensor, single embedding_function
        new_embedding_value = 100
        embedding_fn = get_embedding_function(embedding_value=new_embedding_value)
        vector_store.update_embedding(
            ids=vector_store_hash_ids,
            row_ids=vector_store_row_ids,
            filter=vector_store_filters,
            query=vector_store_query,
            embedding_function=embedding_fn,
            embedding_source_tensor=embedding_source_tensor,
            embedding_tensor=embedding_tensor,
        )
        assert_updated_vector_store(
            new_embedding_value,
            vector_store,
            vector_store_hash_ids,
            vector_store_row_ids,
            vector_store_filters,
            vector_store_query,
            embedding_fn,
            embedding_source_tensor,
            embedding_tensor,
            exec_option,
            num_changed_samples=5,
        )
    
        # case 2: single embedding_source_tensor, single embedding_tensor not specified, single embedding_function
        new_embedding_value = 100
        embedding_fn = get_embedding_function(embedding_value=new_embedding_value)
        vector_store.update_embedding(
            ids=vector_store_hash_ids,
            row_ids=vector_store_row_ids,
            filter=vector_store_filters,
            query=vector_store_query,
            embedding_function=embedding_fn,
            embedding_source_tensor=embedding_source_tensor,
        )
        assert_updated_vector_store(
            new_embedding_value,
            vector_store,
            vector_store_hash_ids,
            vector_store_row_ids,
            vector_store_filters,
            vector_store_query,
            embedding_fn,
            embedding_source_tensor,
            embedding_tensor,
            exec_option,
            num_changed_samples=5,
        )
    
        # case 3-4: single embedding_source_tensor, single embedding_tensor, single init_embedding_function
        if init_embedding_function is None:
            # case 3: errors out when init_embedding_function is not specified
            with pytest.raises(ValueError):
                vector_store.update_embedding(
                    ids=vector_store_hash_ids,
                    row_ids=vector_store_row_ids,
                    filter=vector_store_filters,
                    query=vector_store_query,
                    embedding_source_tensor=embedding_source_tensor,
                )
        else:
            # case 4
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_source_tensor=embedding_source_tensor,
            )
            assert_updated_vector_store(
                0,
                vector_store,
                vector_store_hash_ids,
                vector_store_row_ids,
                vector_store_filters,
                vector_store_query,
                init_embedding_function,
                embedding_source_tensor,
                embedding_tensor,
                exec_option,
                num_changed_samples=5,
            )
    
>       vector_store.delete_by_path(path, token=ds.token)

deeplake/core/vectorstore/test_deeplake_vectorstore.py:1063: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
deeplake/core/vectorstore/deeplake_vectorstore.py:490: in delete_by_path
    deeplake.delete(path, large_ok=True, token=token, force=force, creds=creds)
deeplake/util/spinner.py:151: in inner
    return func(*args, **kwargs)
deeplake/api/dataset.py:905: in delete
    ds.delete(large_ok=large_ok)
deeplake/core/dataset/deeplake_cloud_dataset.py:246: in delete
    self.client.delete_dataset_entry(self.org_id, self.ds_name)
deeplake/client/client.py:306: in delete_dataset_entry
    self.request(
deeplake/client/client.py:148: in request
    check_response_status(response)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

response = <Response [400]>

    def check_response_status(response: requests.Response):
        """Check response status and throw corresponding exception on failure."""
        code = response.status_code
        if code >= 200 and code < 300:
            return
    
        try:
            message = response.json()["description"]
        except Exception:
            message = " "
    
        if code == 400:
>           raise BadRequestException(message)
E           deeplake.util.exceptions.BadRequestException: Invalid Request. One or more request parameters is incorrect.
E           object generator can't be used in 'await' expression

deeplake/client/utils.py:56: BadRequestException

Check failure on line 189 in deeplake/core/vectorstore/deep_memory/test_deepmemory.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_deepmemory.test_deepmemory_evaluate

AssertionError: assert {'recall@1': ...@3': 0.5, ...} == {'recall@1': ...@3': 0.6, ...}
  Omitting 3 identical items, use -vv to show
  Differing items:
  {'recall@1': 0.3} != {'recall@1': 0.4}
  {'recall@3': 0.5} != {'recall@3': 0.6}
  {'recall@50': 0.6} != {'recall@50': 0.7}
  Full diff:
    {
  -  'recall@1': 0.4,
  ?                ^
  +  'recall@1': 0.3,
  ?                ^
     'recall@10': 0.6,
     'recall@100': 0.9,
  -  'recall@3': 0.6,
  ?                ^
  +  'recall@3': 0.5,
  ?                ^
     'recall@5': 0.6,
  -  'recall@50': 0.7,
  ?                 ^
  +  'recall@50': 0.6,
  ?                 ^
    }
Raw output
corpus_query_relevances_copy = ('hub://testingacc2/tmp6d82_test_deepmemory_test_deepmemory_evaluate', ['0-dimensional biomaterials lack inductive pro...5107', 1]], [['32587939', 1]], ...], 'hub://testingacc2/tmp6d82_test_deepmemory_test_deepmemory_evaluate_eval_queries')
questions_embeddings_and_relevances = (array([[-0.01518817,  0.02033963, -0.01228631, ..., -0.00286692,
        -0.0079668 , -0.00414979],
       [-0.003503...A treatment decreases endoplasmic reticulum stress in response to general endoplasmic reticulum stress markers.', ...])
hub_cloud_dev_token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'

    @pytest.mark.slow
    @pytest.mark.timeout(600)
    @pytest.mark.skipif(sys.platform == "win32", reason="Does not run on Windows")
    @requires_libdeeplake
    def test_deepmemory_evaluate(
        corpus_query_relevances_copy,
        questions_embeddings_and_relevances,
        hub_cloud_dev_token,
    ):
        corpus, _, _, query_path = corpus_query_relevances_copy
        (
            questions_embeddings,
            question_relevances,
            queries,
        ) = questions_embeddings_and_relevances
    
        db = VectorStore(
            corpus,
            runtime={"tensor_db": True},
            token=hub_cloud_dev_token,
        )
    
        # when qvs_params is wrong:
        with pytest.raises(ValueError):
            db.deep_memory.evaluate(
                queries=queries,
                embedding=questions_embeddings,
                relevance=question_relevances,
                qvs_params={
                    "log_queries": True,
                    "branch_name": "wrong_branch",
                },
            )
    
        # embedding_function is not provided in the constructor or in the eval method
        with pytest.raises(ValueError):
            db.deep_memory.evaluate(
                queries=queries,
                relevance=question_relevances,
                qvs_params={
                    "log_queries": True,
                    "branch_name": "wrong_branch",
                },
            )
    
        recall = db.deep_memory.evaluate(
            queries=queries,
            embedding=questions_embeddings,
            relevance=question_relevances,
            qvs_params={
                "branch": "queries",
            },
        )
    
>       assert recall["without model"] == {
            "recall@1": 0.4,
            "recall@3": 0.6,
            "recall@5": 0.6,
            "recall@10": 0.6,
            "recall@50": 0.7,
            "recall@100": 0.9,
        }
E       AssertionError: assert {'recall@1': ...@3': 0.5, ...} == {'recall@1': ...@3': 0.6, ...}
E         Omitting 3 identical items, use -vv to show
E         Differing items:
E         {'recall@1': 0.3} != {'recall@1': 0.4}
E         {'recall@3': 0.5} != {'recall@3': 0.6}
E         {'recall@50': 0.6} != {'recall@50': 0.7}
E         Full diff:
E           {
E         -  'recall@1': 0.4,
E         ?                ^
E         +  'recall@1': 0.3,
E         ?                ^
E            'recall@10': 0.6,
E            'recall@100': 0.9,
E         -  'recall@3': 0.6,
E         ?                ^
E         +  'recall@3': 0.5,
E         ?                ^
E            'recall@5': 0.6,
E         -  'recall@50': 0.7,
E         ?                 ^
E         +  'recall@50': 0.6,
E         ?                 ^
E           }

deeplake/core/vectorstore/deep_memory/test_deepmemory.py:189: AssertionError

Check failure on line 1292 in deeplake/core/vectorstore/test_deeplake_vectorstore.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_deeplake_vectorstore.test_update_embedding[None-hub_cloud_ds-None-None-None-None-vector_store_query-hub_cloud_dev_token]

deeplake.util.exceptions.BadRequestException: Invalid Request. One or more request parameters is incorrect.
object generator can't be used in 'await' expression
Raw output
ds = Dataset(path='hub://testingacc2/tmp8196_test_deeplake_vectorstore_test_update_embedding-None-hub_cloud_ds-None-None-None-None-vector_store_query-hub_cloud_dev_token-', tensors=[])
vector_store_hash_ids = None, vector_store_row_ids = None
vector_store_filters = None, vector_store_filter_udf = None
vector_store_query = "select * where metadata['a']==1"
init_embedding_function = None
hub_cloud_dev_token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'

    @requires_libdeeplake
    @pytest.mark.parametrize(
        "ds, vector_store_hash_ids, vector_store_row_ids, vector_store_filters, vector_store_filter_udf, vector_store_query, hub_cloud_dev_token",
        [
            (
                "local_auth_ds",
                "vector_store_hash_ids",
                None,
                None,
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                "vector_store_row_ids",
                None,
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                None,
                None,
                "vector_store_filter_udf",
                None,
                "hub_cloud_dev_token",
            ),
            (
                "local_auth_ds",
                None,
                None,
                "vector_store_filters",
                None,
                None,
                "hub_cloud_dev_token",
            ),
            (
                "hub_cloud_ds",
                None,
                None,
                None,
                None,
                "vector_store_query",
                "hub_cloud_dev_token",
            ),
        ],
        indirect=True,
    )
    @pytest.mark.parametrize("init_embedding_function", [embedding_fn3, None])
    @pytest.mark.slow
    @requires_libdeeplake
    def test_update_embedding(
        ds,
        vector_store_hash_ids,
        vector_store_row_ids,
        vector_store_filters,
        vector_store_filter_udf,
        vector_store_query,
        init_embedding_function,
        hub_cloud_dev_token,
    ):
        vector_store_filters = vector_store_filters or vector_store_filter_udf
    
        exec_option = "compute_engine"
        if vector_store_filter_udf:
            exec_option = "python"
    
        embedding_tensor = "embedding"
        embedding_source_tensor = "text"
        # dataset has a single embedding_tensor:
    
        path = ds.path
        vector_store = DeepLakeVectorStore(
            path=path,
            overwrite=True,
            verbose=False,
            exec_option=exec_option,
            embedding_function=init_embedding_function,
            index_params={"threshold": 10},
            token=hub_cloud_dev_token,
        )
    
        # add data to the dataset:
        metadatas[1:6] = [{"a": 1} for _ in range(5)]
        vector_store.add(id=ids, embedding=embeddings, text=texts, metadata=metadatas)
    
        # case 1: single embedding_source_tensor, single embedding_tensor, single embedding_function
        new_embedding_value = 100
        embedding_fn = get_embedding_function(embedding_value=new_embedding_value)
        vector_store.update_embedding(
            ids=vector_store_hash_ids,
            row_ids=vector_store_row_ids,
            filter=vector_store_filters,
            query=vector_store_query,
            embedding_function=embedding_fn,
            embedding_source_tensor=embedding_source_tensor,
            embedding_tensor=embedding_tensor,
        )
        assert_updated_vector_store(
            new_embedding_value,
            vector_store,
            vector_store_hash_ids,
            vector_store_row_ids,
            vector_store_filters,
            vector_store_query,
            embedding_fn,
            embedding_source_tensor,
            embedding_tensor,
            exec_option,
            num_changed_samples=5,
        )
    
        # case 2: single embedding_source_tensor, single embedding_tensor not specified, single embedding_function
        new_embedding_value = 100
        embedding_fn = get_embedding_function(embedding_value=new_embedding_value)
        vector_store.update_embedding(
            ids=vector_store_hash_ids,
            row_ids=vector_store_row_ids,
            filter=vector_store_filters,
            query=vector_store_query,
            embedding_function=embedding_fn,
            embedding_source_tensor=embedding_source_tensor,
        )
        assert_updated_vector_store(
            new_embedding_value,
            vector_store,
            vector_store_hash_ids,
            vector_store_row_ids,
            vector_store_filters,
            vector_store_query,
            embedding_fn,
            embedding_source_tensor,
            embedding_tensor,
            exec_option,
            num_changed_samples=5,
        )
    
        # case 3-4: single embedding_source_tensor, single embedding_tensor, single init_embedding_function
        if init_embedding_function is None:
            # case 3: errors out when init_embedding_function is not specified
            with pytest.raises(ValueError):
                vector_store.update_embedding(
                    ids=vector_store_hash_ids,
                    row_ids=vector_store_row_ids,
                    filter=vector_store_filters,
                    query=vector_store_query,
                    embedding_source_tensor=embedding_source_tensor,
                )
        else:
            # case 4
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_source_tensor=embedding_source_tensor,
            )
            assert_updated_vector_store(
                0,
                vector_store,
                vector_store_hash_ids,
                vector_store_row_ids,
                vector_store_filters,
                vector_store_query,
                init_embedding_function,
                embedding_source_tensor,
                embedding_tensor,
                exec_option,
                num_changed_samples=5,
            )
    
        vector_store.delete_by_path(path, token=ds.token)
    
        # dataset has a multiple embedding_tensor:
        tensors = [
            {
                "name": "text",
                "htype": "text",
                "create_id_tensor": False,
                "create_sample_info_tensor": False,
                "create_shape_tensor": False,
            },
            {
                "name": "metadata",
                "htype": "json",
                "create_id_tensor": False,
                "create_sample_info_tensor": False,
                "create_shape_tensor": False,
            },
            {
                "name": "embedding",
                "htype": "embedding",
                "dtype": np.float32,
                "create_id_tensor": False,
                "create_sample_info_tensor": False,
                "create_shape_tensor": True,
                "max_chunk_size": 64 * MB,
            },
            {
                "name": "embedding_md",
                "htype": "embedding",
                "dtype": np.float32,
                "create_id_tensor": False,
                "create_sample_info_tensor": False,
                "create_shape_tensor": True,
                "max_chunk_size": 64 * MB,
            },
            {
                "name": "id",
                "htype": "text",
                "create_id_tensor": False,
                "create_sample_info_tensor": False,
                "create_shape_tensor": False,
            },
        ]
        multiple_embedding_tensor = ["embedding", "embedding_md"]
        multiple_embedding_source_tensor = ["embedding", "metadata"]
        vector_store = DeepLakeVectorStore(
            path=path + "_multi",
            overwrite=True,
            verbose=False,
            embedding_function=init_embedding_function,
            tensor_params=tensors,
            token=ds.token,
            exec_option=exec_option,
        )
    
        vector_store.add(
            id=ids,
            text=texts,
            embedding=embeddings,
            embedding_md=embeddings,
            metadata=metadatas,
        )
    
        # case 1: multiple embedding_source_tensor, single embedding_tensor, single embedding_function
        new_embedding_value = [100, 200]
        embedding_fn = get_multiple_embedding_function(new_embedding_value)
        with pytest.raises(ValueError):
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_function=embedding_function,
                embedding_source_tensor=multiple_embedding_source_tensor,
                embedding_tensor=embedding_tensor,
            )
    
        # case 2: multiple embedding_source_tensor, single embedding_tensor, multiple embedding_function -> error out?
        with pytest.raises(ValueError):
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_function=embedding_fn,
                embedding_source_tensor=multiple_embedding_source_tensor,
                embedding_tensor=embedding_tensor,
            )
    
        # case 3: 4 embedding_source_tensor, 2 embedding_tensor, 2 embedding_function
        with pytest.raises(ValueError):
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_function=embedding_fn,
                embedding_source_tensor=multiple_embedding_source_tensor * 2,
                embedding_tensor=embedding_tensor,
            )
    
        # case 4: multiple embedding_source_tensor, multiple embedding_tensor, multiple embedding_function
        new_embedding_value = [100, 200]
        embedding_fn = get_multiple_embedding_function(new_embedding_value)
        vector_store.update_embedding(
            ids=vector_store_hash_ids,
            row_ids=vector_store_row_ids,
            filter=vector_store_filters,
            query=vector_store_query,
            embedding_function=embedding_fn,
            embedding_source_tensor=multiple_embedding_source_tensor,
            embedding_tensor=multiple_embedding_tensor,
        )
    
        assert_updated_vector_store(
            new_embedding_value,
            vector_store,
            vector_store_hash_ids,
            vector_store_row_ids,
            vector_store_filters,
            vector_store_query,
            embedding_fn,
            multiple_embedding_source_tensor,
            multiple_embedding_tensor,
            exec_option,
            num_changed_samples=5,
        )
    
        # case 5-6: multiple embedding_source_tensor, multiple embedding_tensor, single init_embedding_function
        new_embedding_value = [0, 0]
    
        if init_embedding_function is None:
            with pytest.raises(ValueError):
                # case 5: error out because no embedding function was specified
                vector_store.update_embedding(
                    ids=vector_store_hash_ids,
                    row_ids=vector_store_row_ids,
                    filter=vector_store_filters,
                    query=vector_store_query,
                    embedding_source_tensor=multiple_embedding_source_tensor,
                    embedding_tensor=multiple_embedding_tensor,
                )
        else:
            # case 6
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_source_tensor=multiple_embedding_source_tensor,
                embedding_tensor=multiple_embedding_tensor,
            )
            assert_updated_vector_store(
                new_embedding_value,
                vector_store,
                vector_store_hash_ids,
                vector_store_row_ids,
                vector_store_filters,
                vector_store_query,
                embedding_fn3,
                multiple_embedding_source_tensor,
                multiple_embedding_tensor,
                exec_option,
                num_changed_samples=5,
            )
    
        # case 7: multiple embedding_source_tensor, not specified embedding_tensor, multiple embedding_function -> error out?
        with pytest.raises(ValueError):
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_source_tensor=multiple_embedding_source_tensor,
                embedding_function=embedding_fn,
            )
    
        # case 8-9: single embedding_source_tensor, multiple embedding_tensor, single init_embedding_function
        with pytest.raises(ValueError):
            # case 8: error out because embedding_function is not specified during init call and update call
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_source_tensor=embedding_source_tensor,
                embedding_function=embedding_fn,
            )
    
        # case 10: single embedding_source_tensor, multiple embedding_tensor,  multiple embedding_function -> error out?
        with pytest.raises(ValueError):
            # error out because single embedding_source_tensor is specified
            vector_store.update_embedding(
                ids=vector_store_hash_ids,
                row_ids=vector_store_row_ids,
                filter=vector_store_filters,
                query=vector_store_query,
                embedding_source_tensor=embedding_source_tensor,
                embedding_tensor=multiple_embedding_tensor,
                embedding_function=embedding_fn,
            )
    
        # case 11: single embedding_source_tensor, single embedding_tensor, single embedding_function, single init_embedding_function
        new_embedding_value = 300
        embedding_fn = get_embedding_function(new_embedding_value)
        vector_store.update_embedding(
            ids=vector_store_hash_ids,
            row_ids=vector_store_row_ids,
            filter=vector_store_filters,
            query=vector_store_query,
            embedding_source_tensor=embedding_source_tensor,
            embedding_tensor=embedding_tensor,
            embedding_function=embedding_fn,
        )
    
        assert_updated_vector_store(
            new_embedding_value,
            vector_store,
            vector_store_hash_ids,
            vector_store_row_ids,
            vector_store_filters,
            vector_store_query,
            embedding_function,
            embedding_source_tensor,
            embedding_tensor,
            exec_option,
            num_changed_samples=5,
        )
>       vector_store.delete_by_path(path + "_multi", token=ds.token)

deeplake/core/vectorstore/test_deeplake_vectorstore.py:1292: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
deeplake/core/vectorstore/deeplake_vectorstore.py:490: in delete_by_path
    deeplake.delete(path, large_ok=True, token=token, force=force, creds=creds)
deeplake/util/spinner.py:151: in inner
    return func(*args, **kwargs)
deeplake/api/dataset.py:905: in delete
    ds.delete(large_ok=large_ok)
deeplake/core/dataset/deeplake_cloud_dataset.py:246: in delete
    self.client.delete_dataset_entry(self.org_id, self.ds_name)
deeplake/client/client.py:306: in delete_dataset_entry
    self.request(
deeplake/client/client.py:148: in request
    check_response_status(response)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

response = <Response [400]>

    def check_response_status(response: requests.Response):
        """Check response status and throw corresponding exception on failure."""
        code = response.status_code
        if code >= 200 and code < 300:
            return
    
        try:
            message = response.json()["description"]
        except Exception:
            message = " "
    
        if code == 400:
>           raise BadRequestException(message)
E           deeplake.util.exceptions.BadRequestException: Invalid Request. One or more request parameters is incorrect.
E           object generator can't be used in 'await' expression

deeplake/client/utils.py:56: BadRequestException

Check failure on line 189 in deeplake/core/vectorstore/deep_memory/test_deepmemory.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_deepmemory.test_deepmemory_evaluate

AssertionError: assert {'recall@1': ...@3': 0.2, ...} == {'recall@1': ...@3': 0.6, ...}
  Omitting 2 identical items, use -vv to show
  Differing items:
  {'recall@10': 0.5} != {'recall@10': 0.6}
  {'recall@3': 0.2} != {'recall@3': 0.6}
  {'recall@1': 0.1} != {'recall@1': 0.4}
  {'recall@5': 0.4} != {'recall@5': 0.6}
  Full diff:
    {
  -  'recall@1': 0.4,
  ?                ^
  +  'recall@1': 0.1,
  ?                ^
  -  'recall@10': 0.6,
  ?                 ^
  +  'recall@10': 0.5,
  ?                 ^
     'recall@100': 0.9,
  -  'recall@3': 0.6,
  ?                ^
  +  'recall@3': 0.2,
  ?                ^
  -  'recall@5': 0.6,
  ?                ^
  +  'recall@5': 0.4,
  ?                ^
     'recall@50': 0.7,
    }
Raw output
corpus_query_relevances_copy = ('hub://testingacc2/tmp8196_test_deepmemory_test_deepmemory_evaluate', ['0-dimensional biomaterials lack inductive pro...5107', 1]], [['32587939', 1]], ...], 'hub://testingacc2/tmp8196_test_deepmemory_test_deepmemory_evaluate_eval_queries')
questions_embeddings_and_relevances = (array([[-0.01518817,  0.02033963, -0.01228631, ..., -0.00286692,
        -0.0079668 , -0.00414979],
       [-0.003503...A treatment decreases endoplasmic reticulum stress in response to general endoplasmic reticulum stress markers.', ...])
hub_cloud_dev_token = 'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJpZCI6InRlc3RpbmdhY2MyIiwiYXBpX2tleSI6IjU4Y0tLb1p6UE1BbThPU2RpbTRiZ2tBekhWekt1VUE3MFJpNTNyZUpKRTJuaiJ9.'

    @pytest.mark.slow
    @pytest.mark.timeout(600)
    @pytest.mark.skipif(sys.platform == "win32", reason="Does not run on Windows")
    @requires_libdeeplake
    def test_deepmemory_evaluate(
        corpus_query_relevances_copy,
        questions_embeddings_and_relevances,
        hub_cloud_dev_token,
    ):
        corpus, _, _, query_path = corpus_query_relevances_copy
        (
            questions_embeddings,
            question_relevances,
            queries,
        ) = questions_embeddings_and_relevances
    
        db = VectorStore(
            corpus,
            runtime={"tensor_db": True},
            token=hub_cloud_dev_token,
        )
    
        # when qvs_params is wrong:
        with pytest.raises(ValueError):
            db.deep_memory.evaluate(
                queries=queries,
                embedding=questions_embeddings,
                relevance=question_relevances,
                qvs_params={
                    "log_queries": True,
                    "branch_name": "wrong_branch",
                },
            )
    
        # embedding_function is not provided in the constructor or in the eval method
        with pytest.raises(ValueError):
            db.deep_memory.evaluate(
                queries=queries,
                relevance=question_relevances,
                qvs_params={
                    "log_queries": True,
                    "branch_name": "wrong_branch",
                },
            )
    
        recall = db.deep_memory.evaluate(
            queries=queries,
            embedding=questions_embeddings,
            relevance=question_relevances,
            qvs_params={
                "branch": "queries",
            },
        )
    
>       assert recall["without model"] == {
            "recall@1": 0.4,
            "recall@3": 0.6,
            "recall@5": 0.6,
            "recall@10": 0.6,
            "recall@50": 0.7,
            "recall@100": 0.9,
        }
E       AssertionError: assert {'recall@1': ...@3': 0.2, ...} == {'recall@1': ...@3': 0.6, ...}
E         Omitting 2 identical items, use -vv to show
E         Differing items:
E         {'recall@10': 0.5} != {'recall@10': 0.6}
E         {'recall@3': 0.2} != {'recall@3': 0.6}
E         {'recall@1': 0.1} != {'recall@1': 0.4}
E         {'recall@5': 0.4} != {'recall@5': 0.6}
E         Full diff:
E           {
E         -  'recall@1': 0.4,
E         ?                ^
E         +  'recall@1': 0.1,
E         ?                ^
E         -  'recall@10': 0.6,
E         ?                 ^
E         +  'recall@10': 0.5,
E         ?                 ^
E            'recall@100': 0.9,
E         -  'recall@3': 0.6,
E         ?                ^
E         +  'recall@3': 0.2,
E         ?                ^
E         -  'recall@5': 0.6,
E         ?                ^
E         +  'recall@5': 0.4,
E         ?                ^
E            'recall@50': 0.7,
E           }

deeplake/core/vectorstore/deep_memory/test_deepmemory.py:189: AssertionError