Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat (documents): add LarkSuite document loader #6420

Merged
merged 5 commits into from
Jun 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "33205b12",
"metadata": {},
"source": [
"# LarkSuite (FeiShu)\n",
"\n",
">[LarkSuite](https://www.larksuite.com/) is an enterprise collaboration platform developed by ByteDance.\n",
"\n",
"This notebook covers how to load data from the `LarkSuite` REST API into a format that can be ingested into LangChain, along with example usage for text summarization.\n",
"\n",
"The LarkSuite API requires an access token (tenant_access_token or user_access_token), checkout [LarkSuite open platform document](https://open.larksuite.com/document) for API details."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "90b69c94",
"metadata": {
"ExecuteTime": {
"end_time": "2023-06-19T10:05:03.645161Z",
"start_time": "2023-06-19T10:04:49.541968Z"
},
"tags": []
},
"outputs": [],
"source": [
"from getpass import getpass\n",
"from langchain.document_loaders.larksuite import LarkSuiteDocLoader\n",
"\n",
"DOMAIN = input(\"larksuite domain\")\n",
"ACCESS_TOKEN = getpass(\"larksuite tenant_access_token or user_access_token\")\n",
"DOCUMENT_ID = input(\"larksuite document id\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "13deb0f5",
"metadata": {
"ExecuteTime": {
"end_time": "2023-06-19T10:05:36.016495Z",
"start_time": "2023-06-19T10:05:35.360884Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[Document(page_content='Test Doc\\nThis is a Test Doc\\n\\n1\\n2\\n3\\n\\n', metadata={'document_id': 'V76kdbd2HoBbYJxdiNNccajunPf', 'revision_id': 11, 'title': 'Test Doc'})]\n"
]
}
],
"source": [
"from pprint import pprint\n",
"\n",
"larksuite_loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)\n",
"docs = larksuite_loader.load()\n",
"\n",
"pprint(docs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9ccc1e2f",
"metadata": {},
"outputs": [],
"source": [
"# see https://python.langchain.com/docs/use_cases/summarization for more details\n",
"from langchain.chains.summarize import load_summarize_chain\n",
"\n",
"chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n",
"chain.run(docs)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
2 changes: 2 additions & 0 deletions langchain/document_loaders/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
from langchain.document_loaders.iugu import IuguLoader
from langchain.document_loaders.joplin import JoplinLoader
from langchain.document_loaders.json_loader import JSONLoader
from langchain.document_loaders.larksuite import LarkSuiteDocLoader
from langchain.document_loaders.markdown import UnstructuredMarkdownLoader
from langchain.document_loaders.mastodon import MastodonTootsLoader
from langchain.document_loaders.max_compute import MaxComputeLoader
Expand Down Expand Up @@ -196,6 +197,7 @@
"IuguLoader",
"JSONLoader",
"JoplinLoader",
"LarkSuiteDocLoader",
"MWDumpLoader",
"MastodonTootsLoader",
"MathpixPDFLoader",
Expand Down
46 changes: 46 additions & 0 deletions langchain/document_loaders/larksuite.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
"""Loader that loads LarkSuite (FeiShu) document json dump."""
import json
import urllib.request
from typing import Any, Iterator, List

from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader


class LarkSuiteDocLoader(BaseLoader):
"""Loader that loads LarkSuite (FeiShu) document."""

def __init__(self, domain: str, access_token: str, document_id: str):
"""Initialize with domain, access_token (tenant / user), and document_id."""
self.domain = domain
self.access_token = access_token
self.document_id = document_id

def _get_larksuite_api_json_data(self, api_url: str) -> Any:
"""Get LarkSuite (FeiShu) API response json data."""
headers = {"Authorization": f"Bearer {self.access_token}"}
request = urllib.request.Request(api_url, headers=headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data

def lazy_load(self) -> Iterator[Document]:
"""Lazy load LarkSuite (FeiShu) document."""
api_url_prefix = f"{self.domain}/open-apis/docx/v1/documents"
metadata_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}"
)
raw_content_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}/raw_content"
)
text = raw_content_json["data"]["content"]
metadata = {
"document_id": self.document_id,
"revision_id": metadata_json["data"]["document"]["revision_id"],
"title": metadata_json["data"]["document"]["title"],
}
yield Document(page_content=text, metadata=metadata)

def load(self) -> List[Document]:
"""Load LarkSuite (FeiShu) document."""
return list(self.lazy_load())
14 changes: 14 additions & 0 deletions tests/integration_tests/document_loaders/test_larksuite.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from langchain.document_loaders.larksuite import LarkSuiteDocLoader

DOMAIN = ""
ACCESS_TOKEN = ""
DOCUMENT_ID = ""


def test_larksuite_doc_loader() -> None:
"""Test LarkSuite (FeiShu) document loader."""
loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)
docs = loader.load()

assert len(docs) == 1
assert docs[0].page_content is not None