Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions engine/query_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@
"""

import logging
from typing import Dict, Any, Optional, Union
from typing import Any, Dict, Optional, Union

from langchain_core.messages import HumanMessage

from llm_utils.graph_utils.enriched_graph import builder as enriched_builder
from llm_utils.graph_utils.basic_graph import builder as basic_builder
from llm_utils.llm_response_parser import LLMResponseParser
from utils.llm.graph_utils.basic_graph import builder as basic_builder
from utils.llm.graph_utils.enriched_graph import builder as enriched_builder
from utils.llm.llm_response_parser import LLMResponseParser

logger = logging.getLogger(__name__)

Expand Down
2 changes: 1 addition & 1 deletion interface/app_pages/graph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import streamlit as st
from langgraph.graph import END, StateGraph

from llm_utils.graph_utils.base import (
from utils.llm.graph_utils.base import (
CONTEXT_ENRICHMENT,
GET_TABLE_INFO,
PROFILE_EXTRACTION,
Expand Down
9 changes: 4 additions & 5 deletions interface/core/config/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,18 @@
"""

import os
from typing import Any, Dict, Optional
from pathlib import Path
from typing import Any, Dict, Optional

try:
import streamlit as st # type: ignore
except Exception: # pragma: no cover - streamlit may not be present in non-UI contexts
st = None # type: ignore

from llm_utils.tools import set_gms_server
from utils.llm.tools import set_gms_server

from .models import Config


DEFAULT_DATAHUB_SERVER = "http://localhost:8080"
DEFAULT_VECTORDB_TYPE = os.getenv("VECTORDB_TYPE", "faiss").lower()
DEFAULT_VECTORDB_LOCATION = os.getenv("VECTORDB_LOCATION", "")
Expand Down Expand Up @@ -202,7 +201,7 @@ def update_llm_settings(*, provider: str, values: dict[str, str | None]) -> None
"""Update chat LLM settings from UI into process env and session.

This function mirrors the environment-variable based configuration consumed by
llm_utils.llm.factory.get_llm(). Only sets provided keys; missing values are left as-is.
utils.llm.core.factory.get_llm(). Only sets provided keys; missing values are left as-is.
"""
provider_norm = (provider or "").lower()
if provider_norm not in {
Expand All @@ -229,7 +228,7 @@ def update_llm_settings(*, provider: str, values: dict[str, str | None]) -> None
def update_embedding_settings(*, provider: str, values: dict[str, str | None]) -> None:
"""Update Embeddings settings from UI into process env and session.

Mirrors env vars consumed by llm_utils.llm.factory.get_embeddings().
Mirrors env vars consumed by utils.llm.core.factory.get_embeddings().
"""
provider_norm = (provider or "").lower()
if provider_norm not in {
Expand Down
2 changes: 1 addition & 1 deletion interface/core/result_renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
from langchain_core.messages import AIMessage

from infra.observability.token_usage import TokenUtils
from llm_utils.llm_response_parser import LLMResponseParser
from utils.databases import DatabaseFactory
from utils.llm.llm_response_parser import LLMResponseParser
from viz.display_chart import DisplayChart


Expand Down
4 changes: 2 additions & 2 deletions interface/core/session_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ def init_graph(use_enriched: bool) -> str:
"""

builder_module = (
"llm_utils.graph_utils.enriched_graph"
"utils.llm.graph_utils.enriched_graph"
if use_enriched
else "llm_utils.graph_utils.basic_graph"
else "utils.llm.graph_utils.basic_graph"
)

builder = __import__(builder_module, fromlist=["builder"]).builder
Expand Down
Empty file removed llm_utils/__init__.py
Empty file.
5 changes: 0 additions & 5 deletions llm_utils/output_parser/__init__.py

This file was deleted.

1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ include = [
packages = [
"cli",
"interface",
"llm_utils",
"engine",
"infra",
"viz",
Expand Down
2 changes: 1 addition & 1 deletion test/test_llm_utils/test_llm_response_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

import unittest

from llm_utils.llm_response_parser import LLMResponseParser
from utils.llm.llm_response_parser import LLMResponseParser


class TestLLMResponseParser(unittest.TestCase):
Expand Down
2 changes: 1 addition & 1 deletion llm_utils/README.md → utils/llm/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
## llm_utils 개요
## utils.llm 개요

Lang2SQL 파이프라인에서 LLM, 검색(RAG), 그래프 워크플로우, DB 실행, 시각화 등 보조 유틸리티를 모아둔 패키지입니다. 이 문서는 depth(계층)별로 기능과 통합 흐름을 정리합니다.

Expand Down
16 changes: 4 additions & 12 deletions llm_utils/chains.py → utils/llm/chains.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,13 @@
- Question Gate (SQL 적합성 분류)
"""

import os
from langchain_core.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from pydantic import BaseModel, Field
from llm_utils.output_parser.question_suitability import QuestionSuitability
from llm_utils.output_parser.document_suitability import (
DocumentSuitabilityList,
)

from llm_utils.llm import get_llm

from prompt.template_loader import get_prompt_template

from utils.llm.core import get_llm
from utils.llm.output_parser.document_suitability import DocumentSuitabilityList
from utils.llm.output_parser.question_suitability import QuestionSuitability

llm = get_llm()

Expand Down
20 changes: 10 additions & 10 deletions llm_utils/llm/__init__.py → utils/llm/core/__init__.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
from .factory import (
get_llm,
get_llm_openai,
get_llm_azure,
get_llm_bedrock,
get_llm_gemini,
get_llm_ollama,
get_llm_huggingface,
from utils.llm.core.factory import (
get_embeddings,
get_embeddings_openai,
get_embeddings_azure,
get_embeddings_bedrock,
get_embeddings_gemini,
get_embeddings_ollama,
get_embeddings_huggingface,
get_embeddings_ollama,
get_embeddings_openai,
get_llm,
get_llm_azure,
get_llm_bedrock,
get_llm_gemini,
get_llm_huggingface,
get_llm_ollama,
get_llm_openai,
)

__all__ = [
Expand Down
4 changes: 2 additions & 2 deletions llm_utils/llm/factory.py → utils/llm/core/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from typing import Optional

from langchain.llms.base import BaseLanguageModel
from langchain_aws import ChatBedrockConverse, BedrockEmbeddings
from langchain_aws import BedrockEmbeddings, ChatBedrockConverse
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
from langchain_huggingface import (
ChatHuggingFace,
Expand All @@ -11,9 +11,9 @@
)
from langchain_ollama import ChatOllama, OllamaEmbeddings
from langchain_openai import (
AzureChatOpenAI,
AzureOpenAIEmbeddings,
ChatOpenAI,
AzureChatOpenAI,
OpenAIEmbeddings,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,16 @@
이 패키지는 Lang2SQL의 워크플로우 그래프 구성과 관련된 모듈들을 포함합니다.
"""

from .base import (
QueryMakerState,
from utils.llm.graph_utils.base import (
CONTEXT_ENRICHMENT,
GET_TABLE_INFO,
QUERY_MAKER,
PROFILE_EXTRACTION,
CONTEXT_ENRICHMENT,
QUERY_MAKER,
QueryMakerState,
context_enrichment_node,
get_table_info_node,
query_maker_node,
profile_extraction_node,
context_enrichment_node,
query_maker_node,
)

from .basic_graph import builder as basic_builder
Expand Down
12 changes: 5 additions & 7 deletions llm_utils/graph_utils/base.py → utils/llm/graph_utils/base.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
import json

from typing_extensions import TypedDict, Annotated
from langgraph.graph.message import add_messages
from typing_extensions import Annotated, TypedDict


from llm_utils.chains import (
query_maker_chain,
from utils.llm.chains import (
document_suitability_chain,
profile_extraction_chain,
query_enrichment_chain,
query_maker_chain,
question_gate_chain,
document_suitability_chain,
)

from llm_utils.retrieval import search_tables
from utils.llm.retrieval import search_tables

# 노드 식별자 정의
QUESTION_GATE = "question_gate"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,23 +1,22 @@
import json
"""
기본 워크플로우를 위한 StateGraph 구성입니다.
GET_TABLE_INFO -> QUERY_MAKER 순서로 실행됩니다.
"""

from langgraph.graph import StateGraph, END
from llm_utils.graph_utils.base import (
QueryMakerState,
QUESTION_GATE,
GET_TABLE_INFO,
from langgraph.graph import END, StateGraph

from utils.llm.graph_utils.base import (
EVALUATE_DOCUMENT_SUITABILITY,
GET_TABLE_INFO,
QUERY_MAKER,
question_gate_node,
get_table_info_node,
QUESTION_GATE,
QueryMakerState,
document_suitability_node,
get_table_info_node,
query_maker_node,
question_gate_node,
)

"""
기본 워크플로우를 위한 StateGraph 구성입니다.
GET_TABLE_INFO -> QUERY_MAKER 순서로 실행됩니다.
"""

# StateGraph 생성 및 구성
builder = StateGraph(QueryMakerState)
builder.set_entry_point(QUESTION_GATE)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,27 +1,26 @@
import json
"""
기본 워크플로우에 '프로파일 추출(PROFILE_EXTRACTION)'과 '컨텍스트 보강(CONTEXT_ENRICHMENT)'를
추가한 확장된 그래프입니다.
"""

from langgraph.graph import StateGraph, END
from llm_utils.graph_utils.base import (
QueryMakerState,
QUESTION_GATE,
GET_TABLE_INFO,
from langgraph.graph import END, StateGraph

from utils.llm.graph_utils.base import (
CONTEXT_ENRICHMENT,
EVALUATE_DOCUMENT_SUITABILITY,
GET_TABLE_INFO,
PROFILE_EXTRACTION,
CONTEXT_ENRICHMENT,
QUERY_MAKER,
question_gate_node,
get_table_info_node,
QUESTION_GATE,
QueryMakerState,
context_enrichment_node,
document_suitability_node,
get_table_info_node,
profile_extraction_node,
context_enrichment_node,
query_maker_node,
question_gate_node,
)

"""
기본 워크플로우에 '프로파일 추출(PROFILE_EXTRACTION)'과 '컨텍스트 보강(CONTEXT_ENRICHMENT)'를
추가한 확장된 그래프입니다.
"""

# StateGraph 생성 및 구성
builder = StateGraph(QueryMakerState)
builder.set_entry_point(QUESTION_GATE)
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
최상위는 테이블명(string) -> 평가 객체 매핑을 담는 Root 모델입니다.
"""

from typing import Dict, List
from typing import List

from pydantic import BaseModel, Field


Expand Down
5 changes: 2 additions & 3 deletions llm_utils/retrieval.py → utils/llm/retrieval.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
import os
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings

from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import CrossEncoderReranker
from langchain_community.cross_encoders import HuggingFaceCrossEncoder
from transformers import AutoModelForSequenceClassification, AutoTokenizer

from llm_utils.vectordb import get_vector_db
from utils.llm.vectordb import get_vector_db


def load_reranker_model(device: str = "cpu"):
Expand Down
4 changes: 2 additions & 2 deletions llm_utils/tools/__init__.py → utils/llm/tools/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from .datahub import (
set_gms_server,
from utils.llm.tools.datahub import (
get_info_from_db,
get_metadata_from_db,
set_gms_server,
)

__all__ = [
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
VectorDB 모듈 - FAISS와 pgvector를 지원하는 벡터 데이터베이스 추상화
"""

from .factory import get_vector_db
from utils.llm.vectordb.factory import get_vector_db

__all__ = ["get_vector_db"]
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
import os
from typing import Optional

from llm_utils.vectordb.faiss_db import get_faiss_vector_db
from llm_utils.vectordb.pgvector_db import get_pgvector_db
from utils.llm.vectordb.faiss_db import get_faiss_vector_db
from utils.llm.vectordb.pgvector_db import get_pgvector_db


def get_vector_db(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@

from langchain_community.vectorstores import FAISS

from llm_utils.llm import get_embeddings
from llm_utils.tools import get_info_from_db
from utils.llm.core import get_embeddings
from utils.llm.tools import get_info_from_db


def get_faiss_vector_db(vectordb_path: Optional[str] = None):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@

import os
from typing import Optional

import psycopg2
from sqlalchemy.orm import Session
from langchain_postgres.vectorstores import PGVector

from llm_utils.tools import get_info_from_db
from llm_utils.llm import get_embeddings
from utils.llm.core import get_embeddings
from utils.llm.tools import get_info_from_db


def _check_collection_exists(connection_string: str, collection_name: str) -> bool:
Expand Down