Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Alibaba cloud Bailian index intergration #13378

Merged
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
llama_index/_static
.DS_Store
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
bin/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
etc/
include/
lib/
lib64/
parts/
sdist/
share/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
.ruff_cache

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints
notebooks/

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
pyvenv.cfg

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# Jetbrains
.idea
modules/
*.swp

# VsCode
.vscode

# pipenv
Pipfile
Pipfile.lock

# pyright
pyrightconfig.json

# local test file
tests/test_local.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
poetry_requirements(
name="poetry",
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
GIT_ROOT ?= $(shell git rev-parse --show-toplevel)

help: ## Show all Makefile targets.
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'

format: ## Run code autoformatters (black).
pre-commit install
git ls-files | xargs pre-commit run black --files

lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy
pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files

test: ## Run tests via pytest.
pytest tests

watch-docs: ## Build and watch documentation.
sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# LlamaIndex Indices Integration: Managed-Dashscope

PhantomGrapes marked this conversation as resolved.
Show resolved Hide resolved
## Installation

```shell
pip install llama-index-indices-managed-dashscope
```

## Usage

```python
import os
from llama_index.core.schema import QueryBundle
from llama_index.readers.dashscope.base import DashScopeParse
from llama_index.readers.dashscope.utils import ResultType

os.environ["DASHSCOPE_API_KEY"] = "your_api_key_here"
os.environ["DASHSCOPE_WORKSPACE_ID"] = "your_workspace_here"

# init retriever from scratch
from llama_index.indices.managed.dashscope.retriever import (
DashScopeCloudRetriever,
)


file_list = [
# your files (accept doc, docx, pdf)
]

parse = DashScopeParse(result_type=ResultType.DASHCOPE_DOCMIND)
documents = parse.load_data(file_path=file_list)

# create a new index
index = DashScopeCloudIndex.from_documents(
documents,
"my_first_index",
verbose=True,
)

# # connect to an existing index
# index = DashScopeCloudIndex("my_first_index")

retriever = index.as_retriever()
nodes = retriever.retrieve("test query")
print(nodes)
```
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
python_sources()
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from llama_index.indices.managed.dashscope.base import DashScopeCloudIndex
from llama_index.indices.managed.dashscope.retriever import DashScopeCloudRetriever


__all__ = ["DashScopeCloudIndex", "DashScopeCloudRetriever"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
import json
from typing import List, Optional

from llama_index.indices.managed.dashscope.transformations import (
DashScopeConfiguredTransformation,
)
from llama_index.core.schema import BaseNode, TransformComponent


def default_transformations() -> List[TransformComponent]:
"""Default transformations."""
from llama_index.node_parser.dashscope import DashScopeJsonNodeParser
from llama_index.embeddings.dashscope import (
DashScopeEmbedding,
DashScopeTextEmbeddingModels,
DashScopeTextEmbeddingType,
)

node_parser = DashScopeJsonNodeParser()
document_embedder = DashScopeEmbedding(
model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V2,
text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
)
return [
node_parser,
document_embedder,
]


def get_pipeline_create(
name: str,
transformations: Optional[List[TransformComponent]] = None,
documents: Optional[List[BaseNode]] = None,
) -> dict:
configured_transformations: List[DashScopeConfiguredTransformation] = []
for transformation in transformations:
try:
configured_transformations.append(
DashScopeConfiguredTransformation.from_component(transformation)
)
except ValueError:
raise ValueError(f"Unsupported transformation: {type(transformation)}")

configured_transformation_items: List[Dict] = []
for item in configured_transformations:
configured_transformation_items.append(
{
"component": json.loads(item.component.json()),
"configurable_transformation_type": item.configurable_transformation_type.name,
}
)
data_sources = [
{
"source_type": "DATA_CENTER_FILE",
"component": {
"doc_ids": [doc.node_id for doc in documents],
},
}
]
return {
"name": name,
"pipeline_type": "MANAGED_SHARED",
"configured_transformations": configured_transformation_items,
"data_sources": data_sources,
"data_sinks": [
{
"sink_type": "ES",
}
],
# for debug
"data_type": "structured",
"config_model": "recommend",
}


def get_doc_insert(
transformations: Optional[List[TransformComponent]] = None,
documents: Optional[List[BaseNode]] = None,
) -> dict:
configured_transformations: List[DashScopeConfiguredTransformation] = []
for transformation in transformations:
try:
configured_transformations.append(
DashScopeConfiguredTransformation.from_component(transformation)
)
except ValueError:
raise ValueError(f"Unsupported transformation: {type(transformation)}")

configured_transformation_items: List[Dict] = []
for item in configured_transformations:
configured_transformation_items.append(
{
"component": json.loads(item.component.json()),
"configurable_transformation_type": item.configurable_transformation_type.name,
}
)
data_sources = [
{
"source_type": "DATA_CENTER_FILE",
"component": {
"doc_ids": [doc.node_id for doc in documents],
},
}
]
return {
"configured_transformations": configured_transformation_items,
"data_sources": data_sources,
}


def get_doc_delete(ref_doc_ids: List[str]) -> dict:
data_sources = [
{
"source_type": "DATA_CENTER_FILE",
"component": {
"doc_ids": ref_doc_ids,
},
}
]
return {
"data_sources": data_sources,
}