From 77619b50b9dd5e1e8e002d1198c239f829317472 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Fri, 25 Jul 2025 18:32:57 +0530 Subject: [PATCH 01/29] added migration --- ...d35eff62c_add_openai_conversation_table.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 backend/app/alembic/versions/e9dd35eff62c_add_openai_conversation_table.py diff --git a/backend/app/alembic/versions/e9dd35eff62c_add_openai_conversation_table.py b/backend/app/alembic/versions/e9dd35eff62c_add_openai_conversation_table.py new file mode 100644 index 00000000..abbaf7b6 --- /dev/null +++ b/backend/app/alembic/versions/e9dd35eff62c_add_openai_conversation_table.py @@ -0,0 +1,87 @@ +"""add_openai_conversation_table + +Revision ID: e9dd35eff62c +Revises: e8ee93526b37 +Create Date: 2025-07-25 18:26:38.132146 + +""" +from alembic import op +import sqlalchemy as sa +import sqlmodel.sql.sqltypes + + +# revision identifiers, used by Alembic. +revision = "e9dd35eff62c" +down_revision = "e8ee93526b37" +branch_labels = None +depends_on = None + + +def upgrade(): + op.create_table( + "openai_conversation", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("response_id", sqlmodel.sql.sqltypes.AutoString(), nullable=False), + sa.Column( + "ancestor_response_id", sqlmodel.sql.sqltypes.AutoString(), nullable=True + ), + sa.Column( + "previous_response_id", sqlmodel.sql.sqltypes.AutoString(), nullable=True + ), + sa.Column("user_question", sqlmodel.sql.sqltypes.AutoString(), nullable=False), + sa.Column("response", sqlmodel.sql.sqltypes.AutoString(), nullable=True), + sa.Column("model", sqlmodel.sql.sqltypes.AutoString(), nullable=False), + sa.Column("assistant_id", sqlmodel.sql.sqltypes.AutoString(), nullable=False), + sa.Column("project_id", sa.Integer(), nullable=False), + sa.Column("organization_id", sa.Integer(), nullable=False), + sa.Column("is_deleted", sa.Boolean(), nullable=False), + sa.Column("inserted_at", sa.DateTime(), nullable=False), + sa.Column("updated_at", sa.DateTime(), nullable=False), + sa.Column("deleted_at", sa.DateTime(), nullable=True), + sa.PrimaryKeyConstraint("id"), + sa.ForeignKeyConstraint( + ["organization_id"], ["organization.id"], ondelete="CASCADE" + ), + sa.ForeignKeyConstraint(["project_id"], ["project.id"], ondelete="CASCADE"), + ) + op.create_index( + op.f("ix_openai_conversation_ancestor_response_id"), + "openai_conversation", + ["ancestor_response_id"], + unique=False, + ) + op.create_index( + op.f("ix_openai_conversation_previous_response_id"), + "openai_conversation", + ["previous_response_id"], + unique=False, + ) + op.create_index( + op.f("ix_openai_conversation_response_id"), + "openai_conversation", + ["response_id"], + unique=False, + ) + op.create_foreign_key( + None, "openai_conversation", "project", ["project_id"], ["id"] + ) + op.create_foreign_key( + None, "openai_conversation", "organization", ["organization_id"], ["id"] + ) + + +def downgrade(): + op.drop_constraint(None, "openai_conversation", type_="foreignkey") + op.drop_constraint(None, "openai_conversation", type_="foreignkey") + op.drop_index( + op.f("ix_openai_conversation_response_id"), table_name="openai_conversation" + ) + op.drop_index( + op.f("ix_openai_conversation_previous_response_id"), + table_name="openai_conversation", + ) + op.drop_index( + op.f("ix_openai_conversation_ancestor_response_id"), + table_name="openai_conversation", + ) + op.drop_table("openai_conversation") From aeebc1f6a849f46d40df718171260b506e555591 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Sat, 26 Jul 2025 12:01:00 +0530 Subject: [PATCH 02/29] first stab at CRUD --- backend/app/api/main.py | 2 + backend/app/api/routes/openai_conversation.py | 228 ++++++++++ backend/app/crud/__init__.py | 12 + backend/app/crud/openai_conversation.py | 250 +++++++++++ backend/app/models/__init__.py | 7 + backend/app/models/openai_conversation.py | 78 ++++ backend/app/models/organization.py | 4 + backend/app/models/project.py | 3 + .../api/routes/test_openai_conversation.py | 403 +++++++++++++++++ .../tests/crud/test_openai_conversation.py | 406 ++++++++++++++++++ backend/app/tests/utils/conversation.py | 80 ++++ backend/app/tests/utils/utils.py | 26 +- 12 files changed, 1498 insertions(+), 1 deletion(-) create mode 100644 backend/app/api/routes/openai_conversation.py create mode 100644 backend/app/crud/openai_conversation.py create mode 100644 backend/app/models/openai_conversation.py create mode 100644 backend/app/tests/api/routes/test_openai_conversation.py create mode 100644 backend/app/tests/crud/test_openai_conversation.py create mode 100644 backend/app/tests/utils/conversation.py diff --git a/backend/app/api/main.py b/backend/app/api/main.py index 7db3c3d5..df0b1016 100644 --- a/backend/app/api/main.py +++ b/backend/app/api/main.py @@ -7,6 +7,7 @@ documents, login, organization, + openai_conversation, project, project_user, responses, @@ -27,6 +28,7 @@ api_router.include_router(documents.router) api_router.include_router(login.router) api_router.include_router(onboarding.router) +api_router.include_router(openai_conversation.router) api_router.include_router(organization.router) api_router.include_router(project.router) api_router.include_router(project_user.router) diff --git a/backend/app/api/routes/openai_conversation.py b/backend/app/api/routes/openai_conversation.py new file mode 100644 index 00000000..ee553d79 --- /dev/null +++ b/backend/app/api/routes/openai_conversation.py @@ -0,0 +1,228 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends, Path, HTTPException, Query +from sqlmodel import Session + +from app.api.deps import get_db, get_current_user_org_project +from app.crud import ( + get_conversation_by_id, + get_conversation_by_response_id, + get_conversations_by_project, + get_conversations_by_assistant, + get_conversation_thread, + create_conversation, + update_conversation, + delete_conversation, + upsert_conversation, +) +from app.models import ( + UserProjectOrg, + OpenAIConversationCreate, + OpenAIConversationUpdate, + OpenAIConversation, +) +from app.utils import APIResponse + +router = APIRouter(prefix="/openai-conversation", tags=["OpenAI Conversations"]) + + +@router.post("/", response_model=APIResponse[OpenAIConversation], status_code=201) +def create_conversation_route( + conversation_in: OpenAIConversationCreate, + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), +): + """ + Create a new OpenAI conversation in the database. + """ + conversation = create_conversation( + session=session, + conversation=conversation_in, + project_id=current_user.project_id, + organization_id=current_user.organization_id, + ) + return APIResponse.success_response(conversation) + + +@router.post("/upsert", response_model=APIResponse[OpenAIConversation], status_code=201) +def upsert_conversation_route( + conversation_in: OpenAIConversationCreate, + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), +): + """ + Create a new conversation or update existing one if response_id already exists. + """ + conversation = upsert_conversation( + session=session, + conversation=conversation_in, + project_id=current_user.project_id, + organization_id=current_user.organization_id, + ) + return APIResponse.success_response(conversation) + + +@router.patch("/{conversation_id}", response_model=APIResponse[OpenAIConversation]) +def update_conversation_route( + conversation_id: Annotated[int, Path(description="Conversation ID to update")], + conversation_update: OpenAIConversationUpdate, + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), +): + """ + Update an existing conversation with provided fields. + """ + updated_conversation = update_conversation( + session=session, + conversation_id=conversation_id, + project_id=current_user.project_id, + conversation_update=conversation_update, + ) + + if not updated_conversation: + raise HTTPException( + status_code=404, detail=f"Conversation with ID {conversation_id} not found." + ) + + return APIResponse.success_response(updated_conversation) + + +@router.get( + "/{conversation_id}", + response_model=APIResponse[OpenAIConversation], + summary="Get a single conversation by its ID", +) +def get_conversation_route( + conversation_id: int = Path(..., description="The conversation ID to fetch"), + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), +): + """ + Fetch a single conversation by its ID. + """ + conversation = get_conversation_by_id( + session, conversation_id, current_user.project_id + ) + if not conversation: + raise HTTPException( + status_code=404, detail=f"Conversation with ID {conversation_id} not found." + ) + return APIResponse.success_response(conversation) + + +@router.get( + "/response/{response_id}", + response_model=APIResponse[OpenAIConversation], + summary="Get a conversation by its OpenAI response ID", +) +def get_conversation_by_response_id_route( + response_id: str = Path(..., description="The OpenAI response ID to fetch"), + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), +): + """ + Fetch a conversation by its OpenAI response ID. + """ + conversation = get_conversation_by_response_id( + session, response_id, current_user.project_id + ) + if not conversation: + raise HTTPException( + status_code=404, + detail=f"Conversation with response ID {response_id} not found.", + ) + return APIResponse.success_response(conversation) + + +@router.get( + "/thread/{response_id}", + response_model=APIResponse[list[OpenAIConversation]], + summary="Get the full conversation thread starting from a response ID", +) +def get_conversation_thread_route( + response_id: str = Path( + ..., description="The response ID to start the thread from" + ), + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), +): + """ + Get the full conversation thread starting from a given response ID. + This includes all ancestor and previous responses in the conversation chain. + """ + thread_conversations = get_conversation_thread( + session=session, + response_id=response_id, + project_id=current_user.project_id, + ) + return APIResponse.success_response(thread_conversations) + + +@router.get( + "/", + response_model=APIResponse[list[OpenAIConversation]], + summary="List all conversations in the current project", +) +def list_conversations_route( + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), + skip: int = Query(0, ge=0, description="How many items to skip"), + limit: int = Query(100, ge=1, le=100, description="Maximum items to return"), +): + """ + List all conversations in the current project. + """ + conversations = get_conversations_by_project( + session=session, project_id=current_user.project_id, skip=skip, limit=limit + ) + return APIResponse.success_response(conversations) + + +@router.get( + "/assistant/{assistant_id}", + response_model=APIResponse[list[OpenAIConversation]], + summary="List all conversations for a specific assistant", +) +def list_conversations_by_assistant_route( + assistant_id: str = Path(..., description="The assistant ID to filter by"), + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), + skip: int = Query(0, ge=0, description="How many items to skip"), + limit: int = Query(100, ge=1, le=100, description="Maximum items to return"), +): + """ + List all conversations for a specific assistant in the current project. + """ + conversations = get_conversations_by_assistant( + session=session, + assistant_id=assistant_id, + project_id=current_user.project_id, + skip=skip, + limit=limit, + ) + return APIResponse.success_response(conversations) + + +@router.delete("/{conversation_id}", response_model=APIResponse) +def delete_conversation_route( + conversation_id: Annotated[int, Path(description="Conversation ID to delete")], + session: Session = Depends(get_db), + current_user: UserProjectOrg = Depends(get_current_user_org_project), +): + """ + Soft delete a conversation by marking it as deleted. + """ + deleted_conversation = delete_conversation( + session=session, + conversation_id=conversation_id, + project_id=current_user.project_id, + ) + + if not deleted_conversation: + raise HTTPException( + status_code=404, detail=f"Conversation with ID {conversation_id} not found." + ) + + return APIResponse.success_response( + data={"message": "Conversation deleted successfully."} + ) diff --git a/backend/app/crud/__init__.py b/backend/app/crud/__init__.py index 49b09f56..a2cd5f3c 100644 --- a/backend/app/crud/__init__.py +++ b/backend/app/crud/__init__.py @@ -54,3 +54,15 @@ get_assistants_by_project, delete_assistant, ) + +from .openai_conversation import ( + get_conversation_by_id, + get_conversation_by_response_id, + get_conversations_by_project, + get_conversations_by_assistant, + get_conversation_thread, + create_conversation, + update_conversation, + delete_conversation, + upsert_conversation, +) diff --git a/backend/app/crud/openai_conversation.py b/backend/app/crud/openai_conversation.py new file mode 100644 index 00000000..e59ef057 --- /dev/null +++ b/backend/app/crud/openai_conversation.py @@ -0,0 +1,250 @@ +import logging +from typing import Optional, List + +from sqlmodel import Session, and_, select + +from app.models import ( + OpenAIConversation, + OpenAIConversationCreate, + OpenAIConversationUpdate, +) +from app.core.util import now + +logger = logging.getLogger(__name__) + + +def get_conversation_by_id( + session: Session, conversation_id: int, project_id: int +) -> Optional[OpenAIConversation]: + """Get a conversation by its ID and project ID.""" + statement = select(OpenAIConversation).where( + and_( + OpenAIConversation.id == conversation_id, + OpenAIConversation.project_id == project_id, + OpenAIConversation.is_deleted == False, + ) + ) + return session.exec(statement).first() + + +def get_conversation_by_response_id( + session: Session, response_id: str, project_id: int +) -> Optional[OpenAIConversation]: + """Get a conversation by its OpenAI response ID and project ID.""" + statement = select(OpenAIConversation).where( + and_( + OpenAIConversation.response_id == response_id, + OpenAIConversation.project_id == project_id, + OpenAIConversation.is_deleted == False, + ) + ) + return session.exec(statement).first() + + +def get_conversations_by_project( + session: Session, + project_id: int, + skip: int = 0, + limit: int = 100, +) -> List[OpenAIConversation]: + """ + Return all conversations for a given project, with optional pagination. + """ + statement = ( + select(OpenAIConversation) + .where( + OpenAIConversation.project_id == project_id, + OpenAIConversation.is_deleted == False, + ) + .order_by(OpenAIConversation.inserted_at.desc()) + .offset(skip) + .limit(limit) + ) + results = session.exec(statement).all() + return results + + +def get_conversations_by_assistant( + session: Session, + assistant_id: str, + project_id: int, + skip: int = 0, + limit: int = 100, +) -> List[OpenAIConversation]: + """ + Return all conversations for a given assistant and project, with optional pagination. + """ + statement = ( + select(OpenAIConversation) + .where( + OpenAIConversation.assistant_id == assistant_id, + OpenAIConversation.project_id == project_id, + OpenAIConversation.is_deleted == False, + ) + .order_by(OpenAIConversation.inserted_at.desc()) + .offset(skip) + .limit(limit) + ) + results = session.exec(statement).all() + return results + + +def get_conversation_thread( + session: Session, + response_id: str, + project_id: int, +) -> List[OpenAIConversation]: + """ + Get the full conversation thread starting from a given response ID. + This includes all ancestor and previous responses in the conversation chain. + """ + # First, find the root of the conversation thread + root_response_id = response_id + current_conversation = get_conversation_by_response_id( + session, response_id, project_id + ) + + if not current_conversation: + return [] + + # Find the root of the conversation thread + while current_conversation.ancestor_response_id: + root_conversation = get_conversation_by_response_id( + session, current_conversation.ancestor_response_id, project_id + ) + if not root_conversation: + break + root_response_id = current_conversation.ancestor_response_id + current_conversation = root_conversation + + # Now get all conversations in the thread + thread_conversations = [] + current_response_id = root_response_id + + while current_response_id: + conversation = get_conversation_by_response_id( + session, current_response_id, project_id + ) + if not conversation: + break + thread_conversations.append(conversation) + current_response_id = conversation.previous_response_id + + return thread_conversations + + +def create_conversation( + session: Session, + conversation: OpenAIConversationCreate, + project_id: int, + organization_id: int, +) -> OpenAIConversation: + """ + Create a new conversation in the database. + """ + db_conversation = OpenAIConversation( + **conversation.model_dump(), + project_id=project_id, + organization_id=organization_id, + ) + session.add(db_conversation) + session.commit() + session.refresh(db_conversation) + + logger.info( + f"Created conversation with response_id={db_conversation.response_id}, " + f"assistant_id={db_conversation.assistant_id}, project_id={project_id}" + ) + + return db_conversation + + +def update_conversation( + session: Session, + conversation_id: int, + project_id: int, + conversation_update: OpenAIConversationUpdate, +) -> Optional[OpenAIConversation]: + """ + Update an existing conversation. + """ + db_conversation = get_conversation_by_id(session, conversation_id, project_id) + if not db_conversation: + return None + + update_data = conversation_update.model_dump(exclude_unset=True) + for field, value in update_data.items(): + setattr(db_conversation, field, value) + + db_conversation.updated_at = now() + session.add(db_conversation) + session.commit() + session.refresh(db_conversation) + + logger.info( + f"Updated conversation with id={conversation_id}, " + f"response_id={db_conversation.response_id}, project_id={project_id}" + ) + + return db_conversation + + +def delete_conversation( + session: Session, + conversation_id: int, + project_id: int, +) -> Optional[OpenAIConversation]: + """ + Soft delete a conversation by marking it as deleted. + """ + db_conversation = get_conversation_by_id(session, conversation_id, project_id) + if not db_conversation: + return None + + db_conversation.is_deleted = True + db_conversation.deleted_at = now() + session.add(db_conversation) + session.commit() + session.refresh(db_conversation) + + logger.info( + f"Deleted conversation with id={conversation_id}, " + f"response_id={db_conversation.response_id}, project_id={project_id}" + ) + + return db_conversation + + +def upsert_conversation( + session: Session, + conversation: OpenAIConversationCreate, + project_id: int, + organization_id: int, +) -> OpenAIConversation: + """ + Create a new conversation or update existing one if response_id already exists. + """ + existing_conversation = get_conversation_by_response_id( + session, conversation.response_id, project_id + ) + + if existing_conversation: + # Update existing conversation + update_data = conversation.model_dump(exclude_unset=True) + for field, value in update_data.items(): + setattr(existing_conversation, field, value) + + existing_conversation.updated_at = now() + session.add(existing_conversation) + session.commit() + session.refresh(existing_conversation) + + logger.info( + f"Updated existing conversation with response_id={conversation.response_id}, " + f"project_id={project_id}" + ) + + return existing_conversation + else: + # Create new conversation + return create_conversation(session, conversation, project_id, organization_id) diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py index 2c4c87e0..0c8d7bff 100644 --- a/backend/app/models/__init__.py +++ b/backend/app/models/__init__.py @@ -55,3 +55,10 @@ from .threads import OpenAI_Thread, OpenAIThreadBase, OpenAIThreadCreate from .assistants import Assistant, AssistantBase, AssistantCreate, AssistantUpdate + +from .openai_conversation import ( + OpenAIConversation, + OpenAIConversationBase, + OpenAIConversationCreate, + OpenAIConversationUpdate, +) diff --git a/backend/app/models/openai_conversation.py b/backend/app/models/openai_conversation.py new file mode 100644 index 00000000..5d82f11a --- /dev/null +++ b/backend/app/models/openai_conversation.py @@ -0,0 +1,78 @@ +from datetime import datetime +from typing import Optional + +from sqlmodel import Field, Relationship, SQLModel + +from app.core.util import now + + +class OpenAIConversationBase(SQLModel): + response_id: str = Field(index=True, description="OpenAI response ID") + ancestor_response_id: Optional[str] = Field( + default=None, + index=True, + description="Ancestor response ID for conversation threading", + ) + previous_response_id: Optional[str] = Field( + default=None, index=True, description="Previous response ID in the conversation" + ) + user_question: str = Field(description="User's question/input") + response: Optional[str] = Field(default=None, description="AI response") + model: str = Field(description="Model used for the response") + assistant_id: str = Field(description="Assistant ID used for the response") + project_id: int = Field( + foreign_key="project.id", nullable=False, ondelete="CASCADE" + ) + organization_id: int = Field( + foreign_key="organization.id", nullable=False, ondelete="CASCADE" + ) + + +class OpenAIConversation(OpenAIConversationBase, table=True): + __tablename__ = "openai_conversation" + + id: int = Field(default=None, primary_key=True) + inserted_at: datetime = Field(default_factory=now, nullable=False) + updated_at: datetime = Field(default_factory=now, nullable=False) + is_deleted: bool = Field(default=False, nullable=False) + deleted_at: Optional[datetime] = Field(default=None, nullable=True) + + # Relationships + project: "Project" = Relationship(back_populates="openai_conversations") + organization: "Organization" = Relationship(back_populates="openai_conversations") + + +class OpenAIConversationCreate(SQLModel): + response_id: str = Field(description="OpenAI response ID") + ancestor_response_id: Optional[str] = Field( + default=None, description="Ancestor response ID for conversation threading" + ) + previous_response_id: Optional[str] = Field( + default=None, description="Previous response ID in the conversation" + ) + user_question: str = Field(description="User's question/input", min_length=1) + response: Optional[str] = Field(default=None, description="AI response") + model: str = Field(description="Model used for the response", min_length=1) + assistant_id: str = Field( + description="Assistant ID used for the response", min_length=1 + ) + + +class OpenAIConversationUpdate(SQLModel): + response_id: Optional[str] = Field(default=None, description="OpenAI response ID") + ancestor_response_id: Optional[str] = Field( + default=None, description="Ancestor response ID for conversation threading" + ) + previous_response_id: Optional[str] = Field( + default=None, description="Previous response ID in the conversation" + ) + user_question: Optional[str] = Field( + default=None, description="User's question/input", min_length=1 + ) + response: Optional[str] = Field(default=None, description="AI response") + model: Optional[str] = Field( + default=None, description="Model used for the response", min_length=1 + ) + assistant_id: Optional[str] = Field( + default=None, description="Assistant ID used for the response", min_length=1 + ) diff --git a/backend/app/models/organization.py b/backend/app/models/organization.py index 90eed18b..e854b11e 100644 --- a/backend/app/models/organization.py +++ b/backend/app/models/organization.py @@ -11,6 +11,7 @@ from .api_key import APIKey from .assistants import Assistant from .collection import Collection + from .openai_conversation import OpenAIConversation # Shared properties for an Organization @@ -52,6 +53,9 @@ class Organization(OrganizationBase, table=True): collections: list["Collection"] = Relationship( back_populates="organization", cascade_delete=True ) + openai_conversations: list["OpenAIConversation"] = Relationship( + back_populates="organization", cascade_delete=True + ) # Properties to return via API diff --git a/backend/app/models/project.py b/backend/app/models/project.py index de2ceb3c..442b740a 100644 --- a/backend/app/models/project.py +++ b/backend/app/models/project.py @@ -49,6 +49,9 @@ class Project(ProjectBase, table=True): collections: list["Collection"] = Relationship( back_populates="project", cascade_delete=True ) + openai_conversations: list["OpenAIConversation"] = Relationship( + back_populates="project", cascade_delete=True + ) # Properties to return via API diff --git a/backend/app/tests/api/routes/test_openai_conversation.py b/backend/app/tests/api/routes/test_openai_conversation.py new file mode 100644 index 00000000..42f6ae4f --- /dev/null +++ b/backend/app/tests/api/routes/test_openai_conversation.py @@ -0,0 +1,403 @@ +import pytest +from uuid import uuid4 +from sqlmodel import Session +from fastapi import HTTPException +from fastapi.testclient import TestClient + +from app.tests.utils.conversation import get_conversation + + +@pytest.fixture +def conversation_create_payload(): + return { + "response_id": f"resp_{uuid4()}", + "ancestor_response_id": None, + "previous_response_id": None, + "user_question": "What is the capital of France?", + "response": "The capital of France is Paris.", + "model": "gpt-4o", + "assistant_id": f"asst_{uuid4()}", + } + + +@pytest.fixture +def conversation_update_payload(): + return { + "response": "The capital of France is Paris, which is a beautiful city.", + "model": "gpt-4o-mini", + } + + +def test_create_conversation_success( + client: TestClient, + conversation_create_payload: dict, + user_api_key_header: dict, +): + """Test successful conversation creation.""" + response = client.post( + "/api/v1/openai-conversation", + json=conversation_create_payload, + headers=user_api_key_header, + ) + + assert response.status_code == 201 + response_data = response.json() + assert response_data["success"] is True + assert ( + response_data["data"]["response_id"] + == conversation_create_payload["response_id"] + ) + assert ( + response_data["data"]["user_question"] + == conversation_create_payload["user_question"] + ) + assert response_data["data"]["response"] == conversation_create_payload["response"] + assert response_data["data"]["model"] == conversation_create_payload["model"] + assert ( + response_data["data"]["assistant_id"] + == conversation_create_payload["assistant_id"] + ) + + +def test_create_conversation_invalid_data( + client: TestClient, + user_api_key_header: dict, +): + """Test conversation creation with invalid data.""" + invalid_payload = { + "response_id": "", # Empty response_id + "user_question": "", # Empty user_question + "model": "", # Empty model + "assistant_id": "", # Empty assistant_id + } + + response = client.post( + "/api/v1/openai-conversation", + json=invalid_payload, + headers=user_api_key_header, + ) + + assert response.status_code == 422 + + +def test_upsert_conversation_success( + client: TestClient, + conversation_create_payload: dict, + user_api_key_header: dict, +): + """Test successful conversation upsert.""" + response = client.post( + "/api/v1/openai-conversation/upsert", + json=conversation_create_payload, + headers=user_api_key_header, + ) + + assert response.status_code == 201 + response_data = response.json() + assert response_data["success"] is True + assert ( + response_data["data"]["response_id"] + == conversation_create_payload["response_id"] + ) + + +def test_upsert_conversation_update_existing( + client: TestClient, + conversation_create_payload: dict, + user_api_key_header: dict, +): + """Test upsert conversation updates existing conversation.""" + # First create a conversation + response1 = client.post( + "/api/v1/openai-conversation/upsert", + json=conversation_create_payload, + headers=user_api_key_header, + ) + assert response1.status_code == 201 + + # Update the payload and upsert again + conversation_create_payload["response"] = "Updated response" + response2 = client.post( + "/api/v1/openai-conversation/upsert", + json=conversation_create_payload, + headers=user_api_key_header, + ) + + assert response2.status_code == 201 + response_data = response2.json() + assert response_data["success"] is True + assert response_data["data"]["response"] == "Updated response" + + +def test_update_conversation_success( + client: TestClient, + db: Session, + conversation_update_payload: dict, + user_api_key_header: dict, +): + """Test successful conversation update.""" + # Get the project ID from the user's API key + from app.tests.utils.utils import get_user_from_api_key + + api_key = get_user_from_api_key(db, user_api_key_header) + + # Create a conversation in the same project as the API key + conversation = get_conversation(db, project_id=api_key.project_id) + conversation_id = conversation.id + + response = client.patch( + f"/api/v1/openai-conversation/{conversation_id}", + json=conversation_update_payload, + headers=user_api_key_header, + ) + + assert response.status_code == 200 + response_data = response.json() + assert response_data["success"] is True + assert response_data["data"]["response"] == conversation_update_payload["response"] + assert response_data["data"]["model"] == conversation_update_payload["model"] + + +def test_update_conversation_not_found( + client: TestClient, + conversation_update_payload: dict, + user_api_key_header: dict, +): + """Test conversation update with non-existent ID.""" + response = client.patch( + "/api/v1/openai-conversation/99999", + json=conversation_update_payload, + headers=user_api_key_header, + ) + + assert response.status_code == 404 + response_data = response.json() + assert "not found" in response_data["error"] + + +def test_get_conversation_success( + client: TestClient, + db: Session, + user_api_key_header: dict, +): + """Test successful conversation retrieval by ID.""" + # Get the project ID from the user's API key + from app.tests.utils.utils import get_user_from_api_key + + api_key = get_user_from_api_key(db, user_api_key_header) + + # Create a conversation in the same project as the API key + conversation = get_conversation(db, project_id=api_key.project_id) + conversation_id = conversation.id + + response = client.get( + f"/api/v1/openai-conversation/{conversation_id}", + headers=user_api_key_header, + ) + + assert response.status_code == 200 + response_data = response.json() + assert response_data["success"] is True + assert response_data["data"]["id"] == conversation_id + assert response_data["data"]["response_id"] == conversation.response_id + + +def test_get_conversation_not_found( + client: TestClient, + user_api_key_header: dict, +): + """Test conversation retrieval with non-existent ID.""" + response = client.get( + "/api/v1/openai-conversation/99999", + headers=user_api_key_header, + ) + + assert response.status_code == 404 + response_data = response.json() + assert "not found" in response_data["error"] + + +def test_get_conversation_by_response_id_success( + client: TestClient, + db: Session, + user_api_key_header: dict, +): + """Test successful conversation retrieval by response ID.""" + # Get the project ID from the user's API key + from app.tests.utils.utils import get_user_from_api_key + + api_key = get_user_from_api_key(db, user_api_key_header) + + # Create a conversation in the same project as the API key + conversation = get_conversation(db, project_id=api_key.project_id) + response_id = conversation.response_id + + response = client.get( + f"/api/v1/openai-conversation/response/{response_id}", + headers=user_api_key_header, + ) + + assert response.status_code == 200 + response_data = response.json() + assert response_data["success"] is True + assert response_data["data"]["response_id"] == response_id + + +def test_get_conversation_by_response_id_not_found( + client: TestClient, + user_api_key_header: dict, +): + """Test conversation retrieval with non-existent response ID.""" + response = client.get( + "/api/v1/openai-conversation/response/non_existent_response_id", + headers=user_api_key_header, + ) + + assert response.status_code == 404 + response_data = response.json() + assert "not found" in response_data["error"] + + +def test_get_conversation_thread_success( + client: TestClient, + db: Session, + user_api_key_header: dict, +): + """Test successful conversation thread retrieval.""" + conversation = get_conversation(db) + response_id = conversation.response_id + + response = client.get( + f"/api/v1/openai-conversation/thread/{response_id}", + headers=user_api_key_header, + ) + + assert response.status_code == 200 + response_data = response.json() + assert response_data["success"] is True + assert isinstance(response_data["data"], list) + + +def test_list_conversations_success( + client: TestClient, + db: Session, + user_api_key_header: dict, +): + """Test successful conversation listing.""" + # Get the project ID from the user's API key + from app.tests.utils.utils import get_user_from_api_key + + api_key = get_user_from_api_key(db, user_api_key_header) + + # Create a conversation in the same project as the API key + get_conversation(db, project_id=api_key.project_id) + + response = client.get( + "/api/v1/openai-conversation", + headers=user_api_key_header, + ) + + assert response.status_code == 200 + response_data = response.json() + assert response_data["success"] is True + assert isinstance(response_data["data"], list) + assert len(response_data["data"]) > 0 + + +def test_list_conversations_with_pagination( + client: TestClient, + db: Session, + user_api_key_header: dict, +): + """Test conversation listing with pagination.""" + # Create multiple conversations + for _ in range(3): + get_conversation(db) + + response = client.get( + "/api/v1/openai-conversation?skip=1&limit=2", + headers=user_api_key_header, + ) + + assert response.status_code == 200 + response_data = response.json() + assert response_data["success"] is True + assert isinstance(response_data["data"], list) + assert len(response_data["data"]) <= 2 + + +def test_list_conversations_invalid_pagination( + client: TestClient, + user_api_key_header: dict, +): + """Test conversation listing with invalid pagination parameters.""" + response = client.get( + "/api/v1/openai-conversation?skip=-1&limit=0", + headers=user_api_key_header, + ) + + assert response.status_code == 422 + + +def test_list_conversations_by_assistant_success( + client: TestClient, + db: Session, + user_api_key_header: dict, +): + """Test successful conversation listing by assistant.""" + conversation = get_conversation(db) + assistant_id = conversation.assistant_id + + response = client.get( + f"/api/v1/openai-conversation/assistant/{assistant_id}", + headers=user_api_key_header, + ) + + assert response.status_code == 200 + response_data = response.json() + assert response_data["success"] is True + assert isinstance(response_data["data"], list) + # All returned conversations should have the same assistant_id + for conv in response_data["data"]: + assert conv["assistant_id"] == assistant_id + + +def test_delete_conversation_success( + client: TestClient, + db: Session, + user_api_key_header: dict, +): + """Test successful conversation deletion.""" + # Get the project ID from the user's API key + from app.tests.utils.utils import get_user_from_api_key + + api_key = get_user_from_api_key(db, user_api_key_header) + + # Create a conversation in the same project as the API key + conversation = get_conversation(db, project_id=api_key.project_id) + conversation_id = conversation.id + + response = client.delete( + f"/api/v1/openai-conversation/{conversation_id}", + headers=user_api_key_header, + ) + + assert response.status_code == 200 + response_data = response.json() + assert response_data["success"] is True + assert "deleted successfully" in response_data["data"]["message"] + + +def test_delete_conversation_not_found( + client: TestClient, + user_api_key_header: dict, +): + """Test conversation deletion with non-existent ID.""" + response = client.delete( + "/api/v1/openai-conversation/99999", + headers=user_api_key_header, + ) + + assert response.status_code == 404 + response_data = response.json() + assert "not found" in response_data["error"] diff --git a/backend/app/tests/crud/test_openai_conversation.py b/backend/app/tests/crud/test_openai_conversation.py new file mode 100644 index 00000000..3e2c2554 --- /dev/null +++ b/backend/app/tests/crud/test_openai_conversation.py @@ -0,0 +1,406 @@ +import pytest +from uuid import uuid4 +from sqlmodel import Session + +from app.crud.openai_conversation import ( + get_conversation_by_id, + get_conversation_by_response_id, + get_conversations_by_project, + get_conversations_by_assistant, + get_conversation_thread, + create_conversation, + update_conversation, + delete_conversation, + upsert_conversation, +) +from app.models import OpenAIConversationCreate, OpenAIConversationUpdate +from app.tests.utils.conversation import get_conversation +from app.tests.utils.utils import get_project, get_organization + + +@pytest.fixture +def conversation_create_data(): + return OpenAIConversationCreate( + response_id=f"resp_{uuid4()}", + ancestor_response_id=None, + previous_response_id=None, + user_question="What is the capital of France?", + response="The capital of France is Paris.", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + +@pytest.fixture +def conversation_update_data(): + return OpenAIConversationUpdate( + response="The capital of France is Paris, which is a beautiful city.", + model="gpt-4o-mini", + ) + + +def test_create_conversation_success( + db: Session, conversation_create_data: OpenAIConversationCreate +): + """Test successful conversation creation.""" + project = get_project(db) + organization = get_organization(db) + + conversation = create_conversation( + session=db, + conversation=conversation_create_data, + project_id=project.id, + organization_id=organization.id, + ) + + assert conversation is not None + assert conversation.response_id == conversation_create_data.response_id + assert conversation.user_question == conversation_create_data.user_question + assert conversation.response == conversation_create_data.response + assert conversation.model == conversation_create_data.model + assert conversation.assistant_id == conversation_create_data.assistant_id + assert conversation.project_id == project.id + assert conversation.organization_id == organization.id + assert conversation.is_deleted is False + assert conversation.deleted_at is None + + +def test_get_conversation_by_id_success(db: Session): + """Test successful conversation retrieval by ID.""" + conversation = get_conversation(db) + project = get_project(db) + + retrieved_conversation = get_conversation_by_id( + session=db, + conversation_id=conversation.id, + project_id=project.id, + ) + + assert retrieved_conversation is not None + assert retrieved_conversation.id == conversation.id + assert retrieved_conversation.response_id == conversation.response_id + + +def test_get_conversation_by_id_not_found(db: Session): + """Test conversation retrieval by non-existent ID.""" + project = get_project(db) + + retrieved_conversation = get_conversation_by_id( + session=db, + conversation_id=99999, + project_id=project.id, + ) + + assert retrieved_conversation is None + + +def test_get_conversation_by_response_id_success(db: Session): + """Test successful conversation retrieval by response ID.""" + conversation = get_conversation(db) + project = get_project(db) + + retrieved_conversation = get_conversation_by_response_id( + session=db, + response_id=conversation.response_id, + project_id=project.id, + ) + + assert retrieved_conversation is not None + assert retrieved_conversation.response_id == conversation.response_id + assert retrieved_conversation.id == conversation.id + + +def test_get_conversation_by_response_id_not_found(db: Session): + """Test conversation retrieval by non-existent response ID.""" + project = get_project(db) + + retrieved_conversation = get_conversation_by_response_id( + session=db, + response_id="non_existent_response_id", + project_id=project.id, + ) + + assert retrieved_conversation is None + + +def test_get_conversations_by_project_success(db: Session): + """Test successful conversation listing by project.""" + project = get_project(db) + organization = get_organization(db) + + # Create multiple conversations directly + from app.models import OpenAIConversationCreate + from app.crud.openai_conversation import create_conversation + from uuid import uuid4 + + for i in range(3): + conversation_data = OpenAIConversationCreate( + response_id=f"resp_{uuid4()}", + ancestor_response_id=None, + previous_response_id=None, + user_question=f"Test question {i}", + response=f"Test response {i}", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + create_conversation( + session=db, + conversation=conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + conversations = get_conversations_by_project( + session=db, + project_id=project.id, + skip=0, + limit=10, + ) + + assert len(conversations) >= 3 + for conversation in conversations: + assert conversation.project_id == project.id + assert conversation.is_deleted is False + + +def test_get_conversations_by_project_with_pagination(db: Session): + """Test conversation listing by project with pagination.""" + # Create multiple conversations + for _ in range(5): + get_conversation(db) + + project = get_project(db) + + conversations = get_conversations_by_project( + session=db, + project_id=project.id, + skip=2, + limit=2, + ) + + assert len(conversations) <= 2 + + +def test_get_conversations_by_assistant_success(db: Session): + """Test successful conversation listing by assistant.""" + conversation = get_conversation(db) + project = get_project(db) + + conversations = get_conversations_by_assistant( + session=db, + assistant_id=conversation.assistant_id, + project_id=project.id, + skip=0, + limit=10, + ) + + assert len(conversations) >= 1 + for conv in conversations: + assert conv.assistant_id == conversation.assistant_id + assert conv.project_id == project.id + assert conv.is_deleted is False + + +def test_get_conversations_by_assistant_not_found(db: Session): + """Test conversation listing by non-existent assistant.""" + project = get_project(db) + + conversations = get_conversations_by_assistant( + session=db, + assistant_id="non_existent_assistant_id", + project_id=project.id, + skip=0, + limit=10, + ) + + assert len(conversations) == 0 + + +def test_get_conversation_thread_success(db: Session): + """Test successful conversation thread retrieval.""" + conversation = get_conversation(db) + project = get_project(db) + + thread_conversations = get_conversation_thread( + session=db, + response_id=conversation.response_id, + project_id=project.id, + ) + + assert isinstance(thread_conversations, list) + assert len(thread_conversations) >= 1 + assert thread_conversations[0].response_id == conversation.response_id + + +def test_get_conversation_thread_not_found(db: Session): + """Test conversation thread retrieval with non-existent response ID.""" + project = get_project(db) + + thread_conversations = get_conversation_thread( + session=db, + response_id="non_existent_response_id", + project_id=project.id, + ) + + assert isinstance(thread_conversations, list) + assert len(thread_conversations) == 0 + + +def test_update_conversation_success( + db: Session, conversation_update_data: OpenAIConversationUpdate +): + """Test successful conversation update.""" + conversation = get_conversation(db) + project = get_project(db) + + updated_conversation = update_conversation( + session=db, + conversation_id=conversation.id, + project_id=project.id, + conversation_update=conversation_update_data, + ) + + assert updated_conversation is not None + assert updated_conversation.response == conversation_update_data.response + assert updated_conversation.model == conversation_update_data.model + assert updated_conversation.id == conversation.id + + +def test_update_conversation_not_found( + db: Session, conversation_update_data: OpenAIConversationUpdate +): + """Test conversation update with non-existent ID.""" + project = get_project(db) + + updated_conversation = update_conversation( + session=db, + conversation_id=99999, + project_id=project.id, + conversation_update=conversation_update_data, + ) + + assert updated_conversation is None + + +def test_delete_conversation_success(db: Session): + """Test successful conversation deletion.""" + conversation = get_conversation(db) + project = get_project(db) + + deleted_conversation = delete_conversation( + session=db, + conversation_id=conversation.id, + project_id=project.id, + ) + + assert deleted_conversation is not None + assert deleted_conversation.is_deleted is True + assert deleted_conversation.deleted_at is not None + assert deleted_conversation.id == conversation.id + + +def test_delete_conversation_not_found(db: Session): + """Test conversation deletion with non-existent ID.""" + project = get_project(db) + + deleted_conversation = delete_conversation( + session=db, + conversation_id=99999, + project_id=project.id, + ) + + assert deleted_conversation is None + + +def test_upsert_conversation_create_new( + db: Session, conversation_create_data: OpenAIConversationCreate +): + """Test upsert conversation creates new conversation.""" + project = get_project(db) + organization = get_organization(db) + + conversation = upsert_conversation( + session=db, + conversation=conversation_create_data, + project_id=project.id, + organization_id=organization.id, + ) + + assert conversation is not None + assert conversation.response_id == conversation_create_data.response_id + assert conversation.user_question == conversation_create_data.user_question + + +def test_upsert_conversation_update_existing( + db: Session, conversation_create_data: OpenAIConversationCreate +): + """Test upsert conversation updates existing conversation.""" + project = get_project(db) + organization = get_organization(db) + + # First create a conversation + conversation1 = upsert_conversation( + session=db, + conversation=conversation_create_data, + project_id=project.id, + organization_id=organization.id, + ) + + # Update the data and upsert again + conversation_create_data.response = "Updated response" + conversation_create_data.model = "gpt-4o-mini" + + conversation2 = upsert_conversation( + session=db, + conversation=conversation_create_data, + project_id=project.id, + organization_id=organization.id, + ) + + assert conversation2 is not None + assert conversation2.id == conversation1.id # Same conversation + assert conversation2.response == "Updated response" + assert conversation2.model == "gpt-4o-mini" + assert conversation2.response_id == conversation1.response_id + + +def test_conversation_soft_delete_behavior(db: Session): + """Test that soft deleted conversations are not returned by queries.""" + conversation = get_conversation(db) + project = get_project(db) + + # Delete the conversation + delete_conversation( + session=db, + conversation_id=conversation.id, + project_id=project.id, + ) + + # Try to retrieve it by ID + retrieved_conversation = get_conversation_by_id( + session=db, + conversation_id=conversation.id, + project_id=project.id, + ) + + assert retrieved_conversation is None + + # Try to retrieve it by response ID + retrieved_conversation = get_conversation_by_response_id( + session=db, + response_id=conversation.response_id, + project_id=project.id, + ) + + assert retrieved_conversation is None + + # Check that it's not in the project list + conversations = get_conversations_by_project( + session=db, + project_id=project.id, + skip=0, + limit=10, + ) + + conversation_ids = [conv.id for conv in conversations] + assert conversation.id not in conversation_ids diff --git a/backend/app/tests/utils/conversation.py b/backend/app/tests/utils/conversation.py new file mode 100644 index 00000000..e8363ccd --- /dev/null +++ b/backend/app/tests/utils/conversation.py @@ -0,0 +1,80 @@ +from uuid import uuid4 +from sqlmodel import Session, select + +from app.models import OpenAIConversation, OpenAIConversationCreate +from app.crud.openai_conversation import create_conversation + + +def get_conversation( + session: Session, response_id: str | None = None, project_id: int | None = None +) -> OpenAIConversation: + """ + Retrieve an active conversation from the database. + + If a response_id is provided, fetch the active conversation with that response_id. + If a project_id is provided, fetch a conversation from that specific project. + If no response_id or project_id is provided, fetch any random conversation. + """ + if response_id: + statement = ( + select(OpenAIConversation) + .where( + OpenAIConversation.response_id == response_id, + OpenAIConversation.is_deleted == False, + ) + .limit(1) + ) + elif project_id: + statement = ( + select(OpenAIConversation) + .where( + OpenAIConversation.project_id == project_id, + OpenAIConversation.is_deleted == False, + ) + .limit(1) + ) + else: + statement = ( + select(OpenAIConversation) + .where(OpenAIConversation.is_deleted == False) + .limit(1) + ) + + conversation = session.exec(statement).first() + + if not conversation: + # Create a new conversation if none exists + from app.tests.utils.utils import get_project, get_organization + + if project_id: + # Get the specific project + from app.models import Project + + project = session.exec( + select(Project).where(Project.id == project_id) + ).first() + if not project: + raise ValueError(f"Project with ID {project_id} not found") + else: + project = get_project(session) + + organization = get_organization(session) + + conversation_data = OpenAIConversationCreate( + response_id=f"resp_{uuid4()}", + ancestor_response_id=None, + previous_response_id=None, + user_question="Test question", + response="Test response", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + conversation = create_conversation( + session=session, + conversation=conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + return conversation diff --git a/backend/app/tests/utils/utils.py b/backend/app/tests/utils/utils.py index ae4a7bee..9fb5311f 100644 --- a/backend/app/tests/utils/utils.py +++ b/backend/app/tests/utils/utils.py @@ -12,7 +12,7 @@ from app.core.config import settings from app.crud.user import get_user_by_email from app.crud.api_key import get_api_key_by_value, get_api_key_by_user_id -from app.models import APIKeyPublic, Project, Assistant +from app.models import APIKeyPublic, Project, Assistant, Organization T = TypeVar("T") @@ -113,6 +113,30 @@ def get_assistant(session: Session, name: str | None = None) -> Assistant: return assistant +def get_organization(session: Session, name: str | None = None) -> Organization: + """ + Retrieve an active organization from the database. + + If an organization name is provided, fetch the active organization with that name. + If no name is provided, fetch any random organization. + """ + if name: + statement = ( + select(Organization) + .where(Organization.name == name, Organization.is_active) + .limit(1) + ) + else: + statement = select(Organization).where(Organization.is_active).limit(1) + + organization = session.exec(statement).first() + + if not organization: + raise ValueError("No active organizations found") + + return organization + + class SequentialUuidGenerator: def __init__(self, start=0): self.start = start From a2b001cbb87f2282abf4c3a69dc4d3172ae0186b Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Sat, 26 Jul 2025 12:35:57 +0530 Subject: [PATCH 03/29] added ancestor id fetching --- backend/app/api/routes/openai_conversation.py | 102 ++----- backend/app/crud/__init__.py | 5 +- backend/app/crud/openai_conversation.py | 185 +++---------- backend/app/models/__init__.py | 1 - backend/app/models/openai_conversation.py | 20 -- .../api/routes/test_openai_conversation.py | 183 ++++--------- .../tests/crud/test_openai_conversation.py | 250 +++++------------- 7 files changed, 160 insertions(+), 586 deletions(-) diff --git a/backend/app/api/routes/openai_conversation.py b/backend/app/api/routes/openai_conversation.py index ee553d79..10491999 100644 --- a/backend/app/api/routes/openai_conversation.py +++ b/backend/app/api/routes/openai_conversation.py @@ -7,18 +7,14 @@ from app.crud import ( get_conversation_by_id, get_conversation_by_response_id, + get_conversation_by_ancestor_id, get_conversations_by_project, - get_conversations_by_assistant, - get_conversation_thread, create_conversation, - update_conversation, delete_conversation, - upsert_conversation, ) from app.models import ( UserProjectOrg, OpenAIConversationCreate, - OpenAIConversationUpdate, OpenAIConversation, ) from app.utils import APIResponse @@ -44,49 +40,6 @@ def create_conversation_route( return APIResponse.success_response(conversation) -@router.post("/upsert", response_model=APIResponse[OpenAIConversation], status_code=201) -def upsert_conversation_route( - conversation_in: OpenAIConversationCreate, - session: Session = Depends(get_db), - current_user: UserProjectOrg = Depends(get_current_user_org_project), -): - """ - Create a new conversation or update existing one if response_id already exists. - """ - conversation = upsert_conversation( - session=session, - conversation=conversation_in, - project_id=current_user.project_id, - organization_id=current_user.organization_id, - ) - return APIResponse.success_response(conversation) - - -@router.patch("/{conversation_id}", response_model=APIResponse[OpenAIConversation]) -def update_conversation_route( - conversation_id: Annotated[int, Path(description="Conversation ID to update")], - conversation_update: OpenAIConversationUpdate, - session: Session = Depends(get_db), - current_user: UserProjectOrg = Depends(get_current_user_org_project), -): - """ - Update an existing conversation with provided fields. - """ - updated_conversation = update_conversation( - session=session, - conversation_id=conversation_id, - project_id=current_user.project_id, - conversation_update=conversation_update, - ) - - if not updated_conversation: - raise HTTPException( - status_code=404, detail=f"Conversation with ID {conversation_id} not found." - ) - - return APIResponse.success_response(updated_conversation) - - @router.get( "/{conversation_id}", response_model=APIResponse[OpenAIConversation], @@ -135,27 +88,29 @@ def get_conversation_by_response_id_route( @router.get( - "/thread/{response_id}", - response_model=APIResponse[list[OpenAIConversation]], - summary="Get the full conversation thread starting from a response ID", + "/ancestor/{ancestor_response_id}", + response_model=APIResponse[OpenAIConversation], + summary="Get a conversation by its ancestor response ID", ) -def get_conversation_thread_route( - response_id: str = Path( - ..., description="The response ID to start the thread from" +def get_conversation_by_ancestor_id_route( + ancestor_response_id: str = Path( + ..., description="The ancestor response ID to fetch" ), session: Session = Depends(get_db), current_user: UserProjectOrg = Depends(get_current_user_org_project), ): """ - Get the full conversation thread starting from a given response ID. - This includes all ancestor and previous responses in the conversation chain. + Fetch a conversation by its ancestor response ID. """ - thread_conversations = get_conversation_thread( - session=session, - response_id=response_id, - project_id=current_user.project_id, + conversation = get_conversation_by_ancestor_id( + session, ancestor_response_id, current_user.project_id ) - return APIResponse.success_response(thread_conversations) + if not conversation: + raise HTTPException( + status_code=404, + detail=f"Conversation with ancestor response ID {ancestor_response_id} not found.", + ) + return APIResponse.success_response(conversation) @router.get( @@ -178,31 +133,6 @@ def list_conversations_route( return APIResponse.success_response(conversations) -@router.get( - "/assistant/{assistant_id}", - response_model=APIResponse[list[OpenAIConversation]], - summary="List all conversations for a specific assistant", -) -def list_conversations_by_assistant_route( - assistant_id: str = Path(..., description="The assistant ID to filter by"), - session: Session = Depends(get_db), - current_user: UserProjectOrg = Depends(get_current_user_org_project), - skip: int = Query(0, ge=0, description="How many items to skip"), - limit: int = Query(100, ge=1, le=100, description="Maximum items to return"), -): - """ - List all conversations for a specific assistant in the current project. - """ - conversations = get_conversations_by_assistant( - session=session, - assistant_id=assistant_id, - project_id=current_user.project_id, - skip=skip, - limit=limit, - ) - return APIResponse.success_response(conversations) - - @router.delete("/{conversation_id}", response_model=APIResponse) def delete_conversation_route( conversation_id: Annotated[int, Path(description="Conversation ID to delete")], diff --git a/backend/app/crud/__init__.py b/backend/app/crud/__init__.py index a2cd5f3c..fe5855e9 100644 --- a/backend/app/crud/__init__.py +++ b/backend/app/crud/__init__.py @@ -58,11 +58,8 @@ from .openai_conversation import ( get_conversation_by_id, get_conversation_by_response_id, + get_conversation_by_ancestor_id, get_conversations_by_project, - get_conversations_by_assistant, - get_conversation_thread, create_conversation, - update_conversation, delete_conversation, - upsert_conversation, ) diff --git a/backend/app/crud/openai_conversation.py b/backend/app/crud/openai_conversation.py index e59ef057..ad072132 100644 --- a/backend/app/crud/openai_conversation.py +++ b/backend/app/crud/openai_conversation.py @@ -1,13 +1,7 @@ import logging -from typing import Optional, List - -from sqlmodel import Session, and_, select - -from app.models import ( - OpenAIConversation, - OpenAIConversationCreate, - OpenAIConversationUpdate, -) +from typing import List, Optional +from sqlmodel import Session, select +from app.models import OpenAIConversation, OpenAIConversationCreate from app.core.util import now logger = logging.getLogger(__name__) @@ -16,68 +10,60 @@ def get_conversation_by_id( session: Session, conversation_id: int, project_id: int ) -> Optional[OpenAIConversation]: - """Get a conversation by its ID and project ID.""" + """ + Return a conversation by its ID and project. + """ statement = select(OpenAIConversation).where( - and_( - OpenAIConversation.id == conversation_id, - OpenAIConversation.project_id == project_id, - OpenAIConversation.is_deleted == False, - ) + OpenAIConversation.id == conversation_id, + OpenAIConversation.project_id == project_id, + OpenAIConversation.is_deleted == False, ) - return session.exec(statement).first() + result = session.exec(statement).first() + return result def get_conversation_by_response_id( session: Session, response_id: str, project_id: int ) -> Optional[OpenAIConversation]: - """Get a conversation by its OpenAI response ID and project ID.""" + """ + Return a conversation by its OpenAI response ID and project. + """ statement = select(OpenAIConversation).where( - and_( - OpenAIConversation.response_id == response_id, - OpenAIConversation.project_id == project_id, - OpenAIConversation.is_deleted == False, - ) + OpenAIConversation.response_id == response_id, + OpenAIConversation.project_id == project_id, + OpenAIConversation.is_deleted == False, ) - return session.exec(statement).first() + result = session.exec(statement).first() + return result -def get_conversations_by_project( - session: Session, - project_id: int, - skip: int = 0, - limit: int = 100, -) -> List[OpenAIConversation]: +def get_conversation_by_ancestor_id( + session: Session, ancestor_response_id: str, project_id: int +) -> Optional[OpenAIConversation]: """ - Return all conversations for a given project, with optional pagination. + Return a conversation by its ancestor response ID and project. """ - statement = ( - select(OpenAIConversation) - .where( - OpenAIConversation.project_id == project_id, - OpenAIConversation.is_deleted == False, - ) - .order_by(OpenAIConversation.inserted_at.desc()) - .offset(skip) - .limit(limit) + statement = select(OpenAIConversation).where( + OpenAIConversation.ancestor_response_id == ancestor_response_id, + OpenAIConversation.project_id == project_id, + OpenAIConversation.is_deleted == False, ) - results = session.exec(statement).all() - return results + result = session.exec(statement).first() + return result -def get_conversations_by_assistant( +def get_conversations_by_project( session: Session, - assistant_id: str, project_id: int, skip: int = 0, limit: int = 100, ) -> List[OpenAIConversation]: """ - Return all conversations for a given assistant and project, with optional pagination. + Return all conversations for a given project, with optional pagination. """ statement = ( select(OpenAIConversation) .where( - OpenAIConversation.assistant_id == assistant_id, OpenAIConversation.project_id == project_id, OpenAIConversation.is_deleted == False, ) @@ -89,50 +75,6 @@ def get_conversations_by_assistant( return results -def get_conversation_thread( - session: Session, - response_id: str, - project_id: int, -) -> List[OpenAIConversation]: - """ - Get the full conversation thread starting from a given response ID. - This includes all ancestor and previous responses in the conversation chain. - """ - # First, find the root of the conversation thread - root_response_id = response_id - current_conversation = get_conversation_by_response_id( - session, response_id, project_id - ) - - if not current_conversation: - return [] - - # Find the root of the conversation thread - while current_conversation.ancestor_response_id: - root_conversation = get_conversation_by_response_id( - session, current_conversation.ancestor_response_id, project_id - ) - if not root_conversation: - break - root_response_id = current_conversation.ancestor_response_id - current_conversation = root_conversation - - # Now get all conversations in the thread - thread_conversations = [] - current_response_id = root_response_id - - while current_response_id: - conversation = get_conversation_by_response_id( - session, current_response_id, project_id - ) - if not conversation: - break - thread_conversations.append(conversation) - current_response_id = conversation.previous_response_id - - return thread_conversations - - def create_conversation( session: Session, conversation: OpenAIConversationCreate, @@ -159,36 +101,6 @@ def create_conversation( return db_conversation -def update_conversation( - session: Session, - conversation_id: int, - project_id: int, - conversation_update: OpenAIConversationUpdate, -) -> Optional[OpenAIConversation]: - """ - Update an existing conversation. - """ - db_conversation = get_conversation_by_id(session, conversation_id, project_id) - if not db_conversation: - return None - - update_data = conversation_update.model_dump(exclude_unset=True) - for field, value in update_data.items(): - setattr(db_conversation, field, value) - - db_conversation.updated_at = now() - session.add(db_conversation) - session.commit() - session.refresh(db_conversation) - - logger.info( - f"Updated conversation with id={conversation_id}, " - f"response_id={db_conversation.response_id}, project_id={project_id}" - ) - - return db_conversation - - def delete_conversation( session: Session, conversation_id: int, @@ -213,38 +125,3 @@ def delete_conversation( ) return db_conversation - - -def upsert_conversation( - session: Session, - conversation: OpenAIConversationCreate, - project_id: int, - organization_id: int, -) -> OpenAIConversation: - """ - Create a new conversation or update existing one if response_id already exists. - """ - existing_conversation = get_conversation_by_response_id( - session, conversation.response_id, project_id - ) - - if existing_conversation: - # Update existing conversation - update_data = conversation.model_dump(exclude_unset=True) - for field, value in update_data.items(): - setattr(existing_conversation, field, value) - - existing_conversation.updated_at = now() - session.add(existing_conversation) - session.commit() - session.refresh(existing_conversation) - - logger.info( - f"Updated existing conversation with response_id={conversation.response_id}, " - f"project_id={project_id}" - ) - - return existing_conversation - else: - # Create new conversation - return create_conversation(session, conversation, project_id, organization_id) diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py index 0c8d7bff..d1222474 100644 --- a/backend/app/models/__init__.py +++ b/backend/app/models/__init__.py @@ -60,5 +60,4 @@ OpenAIConversation, OpenAIConversationBase, OpenAIConversationCreate, - OpenAIConversationUpdate, ) diff --git a/backend/app/models/openai_conversation.py b/backend/app/models/openai_conversation.py index 5d82f11a..b9e90a41 100644 --- a/backend/app/models/openai_conversation.py +++ b/backend/app/models/openai_conversation.py @@ -56,23 +56,3 @@ class OpenAIConversationCreate(SQLModel): assistant_id: str = Field( description="Assistant ID used for the response", min_length=1 ) - - -class OpenAIConversationUpdate(SQLModel): - response_id: Optional[str] = Field(default=None, description="OpenAI response ID") - ancestor_response_id: Optional[str] = Field( - default=None, description="Ancestor response ID for conversation threading" - ) - previous_response_id: Optional[str] = Field( - default=None, description="Previous response ID in the conversation" - ) - user_question: Optional[str] = Field( - default=None, description="User's question/input", min_length=1 - ) - response: Optional[str] = Field(default=None, description="AI response") - model: Optional[str] = Field( - default=None, description="Model used for the response", min_length=1 - ) - assistant_id: Optional[str] = Field( - default=None, description="Assistant ID used for the response", min_length=1 - ) diff --git a/backend/app/tests/api/routes/test_openai_conversation.py b/backend/app/tests/api/routes/test_openai_conversation.py index 42f6ae4f..fcd8d587 100644 --- a/backend/app/tests/api/routes/test_openai_conversation.py +++ b/backend/app/tests/api/routes/test_openai_conversation.py @@ -20,14 +20,6 @@ def conversation_create_payload(): } -@pytest.fixture -def conversation_update_payload(): - return { - "response": "The capital of France is Paris, which is a beautiful city.", - "model": "gpt-4o-mini", - } - - def test_create_conversation_success( client: TestClient, conversation_create_payload: dict, @@ -80,62 +72,12 @@ def test_create_conversation_invalid_data( assert response.status_code == 422 -def test_upsert_conversation_success( - client: TestClient, - conversation_create_payload: dict, - user_api_key_header: dict, -): - """Test successful conversation upsert.""" - response = client.post( - "/api/v1/openai-conversation/upsert", - json=conversation_create_payload, - headers=user_api_key_header, - ) - - assert response.status_code == 201 - response_data = response.json() - assert response_data["success"] is True - assert ( - response_data["data"]["response_id"] - == conversation_create_payload["response_id"] - ) - - -def test_upsert_conversation_update_existing( - client: TestClient, - conversation_create_payload: dict, - user_api_key_header: dict, -): - """Test upsert conversation updates existing conversation.""" - # First create a conversation - response1 = client.post( - "/api/v1/openai-conversation/upsert", - json=conversation_create_payload, - headers=user_api_key_header, - ) - assert response1.status_code == 201 - - # Update the payload and upsert again - conversation_create_payload["response"] = "Updated response" - response2 = client.post( - "/api/v1/openai-conversation/upsert", - json=conversation_create_payload, - headers=user_api_key_header, - ) - - assert response2.status_code == 201 - response_data = response2.json() - assert response_data["success"] is True - assert response_data["data"]["response"] == "Updated response" - - -def test_update_conversation_success( +def test_get_conversation_success( client: TestClient, db: Session, - conversation_update_payload: dict, user_api_key_header: dict, ): - """Test successful conversation update.""" + """Test successful conversation retrieval.""" # Get the project ID from the user's API key from app.tests.utils.utils import get_user_from_api_key @@ -145,28 +87,25 @@ def test_update_conversation_success( conversation = get_conversation(db, project_id=api_key.project_id) conversation_id = conversation.id - response = client.patch( + response = client.get( f"/api/v1/openai-conversation/{conversation_id}", - json=conversation_update_payload, headers=user_api_key_header, ) assert response.status_code == 200 response_data = response.json() assert response_data["success"] is True - assert response_data["data"]["response"] == conversation_update_payload["response"] - assert response_data["data"]["model"] == conversation_update_payload["model"] + assert response_data["data"]["id"] == conversation_id + assert response_data["data"]["response_id"] == conversation.response_id -def test_update_conversation_not_found( +def test_get_conversation_not_found( client: TestClient, - conversation_update_payload: dict, user_api_key_header: dict, ): - """Test conversation update with non-existent ID.""" - response = client.patch( + """Test conversation retrieval with non-existent ID.""" + response = client.get( "/api/v1/openai-conversation/99999", - json=conversation_update_payload, headers=user_api_key_header, ) @@ -175,12 +114,12 @@ def test_update_conversation_not_found( assert "not found" in response_data["error"] -def test_get_conversation_success( +def test_get_conversation_by_response_id_success( client: TestClient, db: Session, user_api_key_header: dict, ): - """Test successful conversation retrieval by ID.""" + """Test successful conversation retrieval by response ID.""" # Get the project ID from the user's API key from app.tests.utils.utils import get_user_from_api_key @@ -188,27 +127,27 @@ def test_get_conversation_success( # Create a conversation in the same project as the API key conversation = get_conversation(db, project_id=api_key.project_id) - conversation_id = conversation.id + response_id = conversation.response_id response = client.get( - f"/api/v1/openai-conversation/{conversation_id}", + f"/api/v1/openai-conversation/response/{response_id}", headers=user_api_key_header, ) assert response.status_code == 200 response_data = response.json() assert response_data["success"] is True - assert response_data["data"]["id"] == conversation_id - assert response_data["data"]["response_id"] == conversation.response_id + assert response_data["data"]["response_id"] == response_id + assert response_data["data"]["id"] == conversation.id -def test_get_conversation_not_found( +def test_get_conversation_by_response_id_not_found( client: TestClient, user_api_key_header: dict, ): - """Test conversation retrieval with non-existent ID.""" + """Test conversation retrieval with non-existent response ID.""" response = client.get( - "/api/v1/openai-conversation/99999", + "/api/v1/openai-conversation/response/nonexistent_response_id", headers=user_api_key_header, ) @@ -217,39 +156,57 @@ def test_get_conversation_not_found( assert "not found" in response_data["error"] -def test_get_conversation_by_response_id_success( +def test_get_conversation_by_ancestor_id_success( client: TestClient, db: Session, user_api_key_header: dict, ): - """Test successful conversation retrieval by response ID.""" + """Test successful conversation retrieval by ancestor ID.""" # Get the project ID from the user's API key from app.tests.utils.utils import get_user_from_api_key + from app.crud.openai_conversation import create_conversation + from app.models import OpenAIConversationCreate api_key = get_user_from_api_key(db, user_api_key_header) - # Create a conversation in the same project as the API key - conversation = get_conversation(db, project_id=api_key.project_id) - response_id = conversation.response_id + # Create a conversation with an ancestor in the same project as the API key + ancestor_response_id = f"resp_{uuid4()}" + conversation_data = OpenAIConversationCreate( + response_id=f"resp_{uuid4()}", + ancestor_response_id=ancestor_response_id, + previous_response_id=None, + user_question="What is the capital of France?", + response="The capital of France is Paris.", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + conversation = create_conversation( + session=db, + conversation=conversation_data, + project_id=api_key.project_id, + organization_id=api_key.organization_id, + ) response = client.get( - f"/api/v1/openai-conversation/response/{response_id}", + f"/api/v1/openai-conversation/ancestor/{ancestor_response_id}", headers=user_api_key_header, ) assert response.status_code == 200 response_data = response.json() assert response_data["success"] is True - assert response_data["data"]["response_id"] == response_id + assert response_data["data"]["ancestor_response_id"] == ancestor_response_id + assert response_data["data"]["id"] == conversation.id -def test_get_conversation_by_response_id_not_found( +def test_get_conversation_by_ancestor_id_not_found( client: TestClient, user_api_key_header: dict, ): - """Test conversation retrieval with non-existent response ID.""" + """Test conversation retrieval with non-existent ancestor ID.""" response = client.get( - "/api/v1/openai-conversation/response/non_existent_response_id", + "/api/v1/openai-conversation/ancestor/nonexistent_ancestor_id", headers=user_api_key_header, ) @@ -258,26 +215,6 @@ def test_get_conversation_by_response_id_not_found( assert "not found" in response_data["error"] -def test_get_conversation_thread_success( - client: TestClient, - db: Session, - user_api_key_header: dict, -): - """Test successful conversation thread retrieval.""" - conversation = get_conversation(db) - response_id = conversation.response_id - - response = client.get( - f"/api/v1/openai-conversation/thread/{response_id}", - headers=user_api_key_header, - ) - - assert response.status_code == 200 - response_data = response.json() - assert response_data["success"] is True - assert isinstance(response_data["data"], list) - - def test_list_conversations_success( client: TestClient, db: Session, @@ -339,29 +276,6 @@ def test_list_conversations_invalid_pagination( assert response.status_code == 422 -def test_list_conversations_by_assistant_success( - client: TestClient, - db: Session, - user_api_key_header: dict, -): - """Test successful conversation listing by assistant.""" - conversation = get_conversation(db) - assistant_id = conversation.assistant_id - - response = client.get( - f"/api/v1/openai-conversation/assistant/{assistant_id}", - headers=user_api_key_header, - ) - - assert response.status_code == 200 - response_data = response.json() - assert response_data["success"] is True - assert isinstance(response_data["data"], list) - # All returned conversations should have the same assistant_id - for conv in response_data["data"]: - assert conv["assistant_id"] == assistant_id - - def test_delete_conversation_success( client: TestClient, db: Session, @@ -387,6 +301,13 @@ def test_delete_conversation_success( assert response_data["success"] is True assert "deleted successfully" in response_data["data"]["message"] + # Verify the conversation is marked as deleted + response = client.get( + f"/api/v1/openai-conversation/{conversation_id}", + headers=user_api_key_header, + ) + assert response.status_code == 404 + def test_delete_conversation_not_found( client: TestClient, diff --git a/backend/app/tests/crud/test_openai_conversation.py b/backend/app/tests/crud/test_openai_conversation.py index 3e2c2554..1d4a3245 100644 --- a/backend/app/tests/crud/test_openai_conversation.py +++ b/backend/app/tests/crud/test_openai_conversation.py @@ -5,15 +5,12 @@ from app.crud.openai_conversation import ( get_conversation_by_id, get_conversation_by_response_id, + get_conversation_by_ancestor_id, get_conversations_by_project, - get_conversations_by_assistant, - get_conversation_thread, create_conversation, - update_conversation, delete_conversation, - upsert_conversation, ) -from app.models import OpenAIConversationCreate, OpenAIConversationUpdate +from app.models import OpenAIConversationCreate from app.tests.utils.conversation import get_conversation from app.tests.utils.utils import get_project, get_organization @@ -31,14 +28,6 @@ def conversation_create_data(): ) -@pytest.fixture -def conversation_update_data(): - return OpenAIConversationUpdate( - response="The capital of France is Paris, which is a beautiful city.", - model="gpt-4o-mini", - ) - - def test_create_conversation_success( db: Session, conversation_create_data: OpenAIConversationCreate ): @@ -106,8 +95,8 @@ def test_get_conversation_by_response_id_success(db: Session): ) assert retrieved_conversation is not None - assert retrieved_conversation.response_id == conversation.response_id assert retrieved_conversation.id == conversation.id + assert retrieved_conversation.response_id == conversation.response_id def test_get_conversation_by_response_id_not_found(db: Session): @@ -116,7 +105,55 @@ def test_get_conversation_by_response_id_not_found(db: Session): retrieved_conversation = get_conversation_by_response_id( session=db, - response_id="non_existent_response_id", + response_id="nonexistent_response_id", + project_id=project.id, + ) + + assert retrieved_conversation is None + + +def test_get_conversation_by_ancestor_id_success(db: Session): + """Test successful conversation retrieval by ancestor ID.""" + project = get_project(db) + organization = get_organization(db) + + # Create a conversation with an ancestor + ancestor_response_id = f"resp_{uuid4()}" + conversation_data = OpenAIConversationCreate( + response_id=f"resp_{uuid4()}", + ancestor_response_id=ancestor_response_id, + previous_response_id=None, + user_question="What is the capital of France?", + response="The capital of France is Paris.", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + conversation = create_conversation( + session=db, + conversation=conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + retrieved_conversation = get_conversation_by_ancestor_id( + session=db, + ancestor_response_id=ancestor_response_id, + project_id=project.id, + ) + + assert retrieved_conversation is not None + assert retrieved_conversation.id == conversation.id + assert retrieved_conversation.ancestor_response_id == ancestor_response_id + + +def test_get_conversation_by_ancestor_id_not_found(db: Session): + """Test conversation retrieval by non-existent ancestor ID.""" + project = get_project(db) + + retrieved_conversation = get_conversation_by_ancestor_id( + session=db, + ancestor_response_id="nonexistent_ancestor_id", project_id=project.id, ) @@ -129,10 +166,6 @@ def test_get_conversations_by_project_success(db: Session): organization = get_organization(db) # Create multiple conversations directly - from app.models import OpenAIConversationCreate - from app.crud.openai_conversation import create_conversation - from uuid import uuid4 - for i in range(3): conversation_data = OpenAIConversationCreate( response_id=f"resp_{uuid4()}", @@ -153,8 +186,6 @@ def test_get_conversations_by_project_success(db: Session): conversations = get_conversations_by_project( session=db, project_id=project.id, - skip=0, - limit=10, ) assert len(conversations) >= 3 @@ -165,123 +196,22 @@ def test_get_conversations_by_project_success(db: Session): def test_get_conversations_by_project_with_pagination(db: Session): """Test conversation listing by project with pagination.""" + project = get_project(db) + # Create multiple conversations for _ in range(5): - get_conversation(db) - - project = get_project(db) + get_conversation(db, project_id=project.id) conversations = get_conversations_by_project( session=db, project_id=project.id, - skip=2, + skip=1, limit=2, ) assert len(conversations) <= 2 -def test_get_conversations_by_assistant_success(db: Session): - """Test successful conversation listing by assistant.""" - conversation = get_conversation(db) - project = get_project(db) - - conversations = get_conversations_by_assistant( - session=db, - assistant_id=conversation.assistant_id, - project_id=project.id, - skip=0, - limit=10, - ) - - assert len(conversations) >= 1 - for conv in conversations: - assert conv.assistant_id == conversation.assistant_id - assert conv.project_id == project.id - assert conv.is_deleted is False - - -def test_get_conversations_by_assistant_not_found(db: Session): - """Test conversation listing by non-existent assistant.""" - project = get_project(db) - - conversations = get_conversations_by_assistant( - session=db, - assistant_id="non_existent_assistant_id", - project_id=project.id, - skip=0, - limit=10, - ) - - assert len(conversations) == 0 - - -def test_get_conversation_thread_success(db: Session): - """Test successful conversation thread retrieval.""" - conversation = get_conversation(db) - project = get_project(db) - - thread_conversations = get_conversation_thread( - session=db, - response_id=conversation.response_id, - project_id=project.id, - ) - - assert isinstance(thread_conversations, list) - assert len(thread_conversations) >= 1 - assert thread_conversations[0].response_id == conversation.response_id - - -def test_get_conversation_thread_not_found(db: Session): - """Test conversation thread retrieval with non-existent response ID.""" - project = get_project(db) - - thread_conversations = get_conversation_thread( - session=db, - response_id="non_existent_response_id", - project_id=project.id, - ) - - assert isinstance(thread_conversations, list) - assert len(thread_conversations) == 0 - - -def test_update_conversation_success( - db: Session, conversation_update_data: OpenAIConversationUpdate -): - """Test successful conversation update.""" - conversation = get_conversation(db) - project = get_project(db) - - updated_conversation = update_conversation( - session=db, - conversation_id=conversation.id, - project_id=project.id, - conversation_update=conversation_update_data, - ) - - assert updated_conversation is not None - assert updated_conversation.response == conversation_update_data.response - assert updated_conversation.model == conversation_update_data.model - assert updated_conversation.id == conversation.id - - -def test_update_conversation_not_found( - db: Session, conversation_update_data: OpenAIConversationUpdate -): - """Test conversation update with non-existent ID.""" - project = get_project(db) - - updated_conversation = update_conversation( - session=db, - conversation_id=99999, - project_id=project.id, - conversation_update=conversation_update_data, - ) - - assert updated_conversation is None - - def test_delete_conversation_success(db: Session): """Test successful conversation deletion.""" conversation = get_conversation(db) @@ -294,9 +224,9 @@ def test_delete_conversation_success(db: Session): ) assert deleted_conversation is not None + assert deleted_conversation.id == conversation.id assert deleted_conversation.is_deleted is True assert deleted_conversation.deleted_at is not None - assert deleted_conversation.id == conversation.id def test_delete_conversation_not_found(db: Session): @@ -312,60 +242,8 @@ def test_delete_conversation_not_found(db: Session): assert deleted_conversation is None -def test_upsert_conversation_create_new( - db: Session, conversation_create_data: OpenAIConversationCreate -): - """Test upsert conversation creates new conversation.""" - project = get_project(db) - organization = get_organization(db) - - conversation = upsert_conversation( - session=db, - conversation=conversation_create_data, - project_id=project.id, - organization_id=organization.id, - ) - - assert conversation is not None - assert conversation.response_id == conversation_create_data.response_id - assert conversation.user_question == conversation_create_data.user_question - - -def test_upsert_conversation_update_existing( - db: Session, conversation_create_data: OpenAIConversationCreate -): - """Test upsert conversation updates existing conversation.""" - project = get_project(db) - organization = get_organization(db) - - # First create a conversation - conversation1 = upsert_conversation( - session=db, - conversation=conversation_create_data, - project_id=project.id, - organization_id=organization.id, - ) - - # Update the data and upsert again - conversation_create_data.response = "Updated response" - conversation_create_data.model = "gpt-4o-mini" - - conversation2 = upsert_conversation( - session=db, - conversation=conversation_create_data, - project_id=project.id, - organization_id=organization.id, - ) - - assert conversation2 is not None - assert conversation2.id == conversation1.id # Same conversation - assert conversation2.response == "Updated response" - assert conversation2.model == "gpt-4o-mini" - assert conversation2.response_id == conversation1.response_id - - def test_conversation_soft_delete_behavior(db: Session): - """Test that soft deleted conversations are not returned by queries.""" + """Test that deleted conversations are not returned by get functions.""" conversation = get_conversation(db) project = get_project(db) @@ -376,31 +254,23 @@ def test_conversation_soft_delete_behavior(db: Session): project_id=project.id, ) - # Try to retrieve it by ID + # Verify it's not returned by get functions retrieved_conversation = get_conversation_by_id( session=db, conversation_id=conversation.id, project_id=project.id, ) - assert retrieved_conversation is None - # Try to retrieve it by response ID retrieved_conversation = get_conversation_by_response_id( session=db, response_id=conversation.response_id, project_id=project.id, ) - assert retrieved_conversation is None - # Check that it's not in the project list conversations = get_conversations_by_project( session=db, project_id=project.id, - skip=0, - limit=10, ) - - conversation_ids = [conv.id for conv in conversations] - assert conversation.id not in conversation_ids + assert conversation.id not in [c.id for c in conversations] From 42d1d2c224caaa94545830a14ddbf35cd4e67f6c Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Sun, 27 Jul 2025 22:42:32 +0530 Subject: [PATCH 04/29] removing create endpoint --- backend/app/api/routes/openai_conversation.py | 19 ------ .../api/routes/test_openai_conversation.py | 65 ------------------- .../tests/crud/test_openai_conversation.py | 39 ----------- 3 files changed, 123 deletions(-) diff --git a/backend/app/api/routes/openai_conversation.py b/backend/app/api/routes/openai_conversation.py index 10491999..0cbc1425 100644 --- a/backend/app/api/routes/openai_conversation.py +++ b/backend/app/api/routes/openai_conversation.py @@ -9,7 +9,6 @@ get_conversation_by_response_id, get_conversation_by_ancestor_id, get_conversations_by_project, - create_conversation, delete_conversation, ) from app.models import ( @@ -22,24 +21,6 @@ router = APIRouter(prefix="/openai-conversation", tags=["OpenAI Conversations"]) -@router.post("/", response_model=APIResponse[OpenAIConversation], status_code=201) -def create_conversation_route( - conversation_in: OpenAIConversationCreate, - session: Session = Depends(get_db), - current_user: UserProjectOrg = Depends(get_current_user_org_project), -): - """ - Create a new OpenAI conversation in the database. - """ - conversation = create_conversation( - session=session, - conversation=conversation_in, - project_id=current_user.project_id, - organization_id=current_user.organization_id, - ) - return APIResponse.success_response(conversation) - - @router.get( "/{conversation_id}", response_model=APIResponse[OpenAIConversation], diff --git a/backend/app/tests/api/routes/test_openai_conversation.py b/backend/app/tests/api/routes/test_openai_conversation.py index fcd8d587..e28fa9dc 100644 --- a/backend/app/tests/api/routes/test_openai_conversation.py +++ b/backend/app/tests/api/routes/test_openai_conversation.py @@ -7,71 +7,6 @@ from app.tests.utils.conversation import get_conversation -@pytest.fixture -def conversation_create_payload(): - return { - "response_id": f"resp_{uuid4()}", - "ancestor_response_id": None, - "previous_response_id": None, - "user_question": "What is the capital of France?", - "response": "The capital of France is Paris.", - "model": "gpt-4o", - "assistant_id": f"asst_{uuid4()}", - } - - -def test_create_conversation_success( - client: TestClient, - conversation_create_payload: dict, - user_api_key_header: dict, -): - """Test successful conversation creation.""" - response = client.post( - "/api/v1/openai-conversation", - json=conversation_create_payload, - headers=user_api_key_header, - ) - - assert response.status_code == 201 - response_data = response.json() - assert response_data["success"] is True - assert ( - response_data["data"]["response_id"] - == conversation_create_payload["response_id"] - ) - assert ( - response_data["data"]["user_question"] - == conversation_create_payload["user_question"] - ) - assert response_data["data"]["response"] == conversation_create_payload["response"] - assert response_data["data"]["model"] == conversation_create_payload["model"] - assert ( - response_data["data"]["assistant_id"] - == conversation_create_payload["assistant_id"] - ) - - -def test_create_conversation_invalid_data( - client: TestClient, - user_api_key_header: dict, -): - """Test conversation creation with invalid data.""" - invalid_payload = { - "response_id": "", # Empty response_id - "user_question": "", # Empty user_question - "model": "", # Empty model - "assistant_id": "", # Empty assistant_id - } - - response = client.post( - "/api/v1/openai-conversation", - json=invalid_payload, - headers=user_api_key_header, - ) - - assert response.status_code == 422 - - def test_get_conversation_success( client: TestClient, db: Session, diff --git a/backend/app/tests/crud/test_openai_conversation.py b/backend/app/tests/crud/test_openai_conversation.py index 1d4a3245..79f033f7 100644 --- a/backend/app/tests/crud/test_openai_conversation.py +++ b/backend/app/tests/crud/test_openai_conversation.py @@ -15,45 +15,6 @@ from app.tests.utils.utils import get_project, get_organization -@pytest.fixture -def conversation_create_data(): - return OpenAIConversationCreate( - response_id=f"resp_{uuid4()}", - ancestor_response_id=None, - previous_response_id=None, - user_question="What is the capital of France?", - response="The capital of France is Paris.", - model="gpt-4o", - assistant_id=f"asst_{uuid4()}", - ) - - -def test_create_conversation_success( - db: Session, conversation_create_data: OpenAIConversationCreate -): - """Test successful conversation creation.""" - project = get_project(db) - organization = get_organization(db) - - conversation = create_conversation( - session=db, - conversation=conversation_create_data, - project_id=project.id, - organization_id=organization.id, - ) - - assert conversation is not None - assert conversation.response_id == conversation_create_data.response_id - assert conversation.user_question == conversation_create_data.user_question - assert conversation.response == conversation_create_data.response - assert conversation.model == conversation_create_data.model - assert conversation.assistant_id == conversation_create_data.assistant_id - assert conversation.project_id == project.id - assert conversation.organization_id == organization.id - assert conversation.is_deleted is False - assert conversation.deleted_at is None - - def test_get_conversation_by_id_success(db: Session): """Test successful conversation retrieval by ID.""" conversation = get_conversation(db) From 7df16e8c67b35411b66306a5f5268af4405ff509 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Sun, 27 Jul 2025 22:57:09 +0530 Subject: [PATCH 05/29] initiating populating data --- backend/app/api/routes/responses.py | 57 ++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index 130a4839..64b7e6b4 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -11,7 +11,8 @@ from app.api.routes.threads import send_callback from app.crud.assistants import get_assistant_by_id from app.crud.credentials import get_provider_credential -from app.models import UserProjectOrg +from app.crud.openai_conversation import create_conversation +from app.models import UserProjectOrg, OpenAIConversationCreate from app.utils import APIResponse, mask_string from app.core.langfuse.langfuse import LangfuseTracer @@ -98,6 +99,8 @@ def process_response( assistant, tracer: LangfuseTracer, project_id: int, + organization_id: int, + session: Session, ): """Process a response and send callback with results, with Langfuse tracing.""" logger.info( @@ -166,6 +169,31 @@ def process_response( }, ) + # Create conversation record in database + conversation_data = OpenAIConversationCreate( + response_id=response.id, + previous_response_id=request.response_id, + user_question=request.question, + response=response.output_text, + model=response.model, + assistant_id=request.assistant_id, + ) + + try: + create_conversation( + session=session, + conversation=conversation_data, + project_id=project_id, + organization_id=organization_id, + ) + logger.info( + f"Created conversation record for response_id={response.id}, assistant_id={mask_string(request.assistant_id)}, project_id={project_id}" + ) + except Exception as e: + logger.error( + f"Failed to create conversation record for response_id={response.id}, assistant_id={mask_string(request.assistant_id)}, project_id={project_id}: {str(e)}" + ) + request_dict = request.model_dump() callback_response = ResponsesAPIResponse.success_response( data=_APIResponse( @@ -264,6 +292,8 @@ async def responses( assistant, tracer, project_id, + organization_id, + _session, ) logger.info( @@ -369,6 +399,31 @@ async def responses_sync( }, ) + # Create conversation record in database + conversation_data = OpenAIConversationCreate( + response_id=response.id, + previous_response_id=request.response_id, + user_question=request.question, + response=response.output_text, + model=response.model, + assistant_id="sync_request", # For sync requests, we don't have assistant_id + ) + + try: + create_conversation( + session=_session, + conversation=conversation_data, + project_id=project_id, + organization_id=organization_id, + ) + logger.info( + f"Created conversation record for sync response_id={response.id}, project_id={project_id}" + ) + except Exception as e: + logger.error( + f"Failed to create conversation record for sync response_id={response.id}, project_id={project_id}: {str(e)}" + ) + tracer.flush() return ResponsesAPIResponse.success_response( From 9432e0db44cee9b82a1aeb61827950593b167ebd Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Sun, 27 Jul 2025 23:50:37 +0530 Subject: [PATCH 06/29] added lookup logic --- backend/app/api/routes/responses.py | 43 ++++++++++++++++++++++++++++- backend/app/api/routes/threads.py | 2 +- 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index 64b7e6b4..f44acb6b 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -11,7 +11,10 @@ from app.api.routes.threads import send_callback from app.crud.assistants import get_assistant_by_id from app.crud.credentials import get_provider_credential -from app.crud.openai_conversation import create_conversation +from app.crud.openai_conversation import ( + create_conversation, + get_conversation_by_response_id, +) from app.models import UserProjectOrg, OpenAIConversationCreate from app.utils import APIResponse, mask_string from app.core.langfuse.langfuse import LangfuseTracer @@ -169,10 +172,30 @@ def process_response( }, ) + # Determine ancestor_response_id based on previous_response_id + ancestor_response_id = None + if response.previous_response_id is None: + # If previous_response_id is None, then ancestor_response_id = response.id + ancestor_response_id = response.id + else: + # If previous_response_id is not None, look in db for that ID + previous_conversation = get_conversation_by_response_id( + session=session, + response_id=response.previous_response_id, + project_id=project_id, + ) + if previous_conversation: + # If found, use that conversation's ancestor_id + ancestor_response_id = previous_conversation.ancestor_response_id + else: + # If not found, ancestor_response_id = previous_response_id + ancestor_response_id = request.response_id + # Create conversation record in database conversation_data = OpenAIConversationCreate( response_id=response.id, previous_response_id=request.response_id, + ancestor_response_id=ancestor_response_id, user_question=request.question, response=response.output_text, model=response.model, @@ -399,10 +422,28 @@ async def responses_sync( }, ) + # Determine ancestor_response_id based on previous_response_id for sync endpoint + ancestor_response_id = None + if request.response_id is None: + # If previous_response_id is None, then ancestor_response_id = response.id + ancestor_response_id = response.id + else: + # If previous_response_id is not None, look in db for that ID + previous_conversation = get_conversation_by_response_id( + session=_session, response_id=request.response_id, project_id=project_id + ) + if previous_conversation: + # If found, use that conversation's ancestor_id + ancestor_response_id = previous_conversation.ancestor_response_id + else: + # If not found, ancestor_response_id = previous_response_id + ancestor_response_id = request.response_id + # Create conversation record in database conversation_data = OpenAIConversationCreate( response_id=response.id, previous_response_id=request.response_id, + ancestor_response_id=ancestor_response_id, user_question=request.question, response=response.output_text, model=response.model, diff --git a/backend/app/api/routes/threads.py b/backend/app/api/routes/threads.py index 90fe0415..1f1d28b5 100644 --- a/backend/app/api/routes/threads.py +++ b/backend/app/api/routes/threads.py @@ -39,7 +39,7 @@ def send_callback(callback_url: str, data: dict): try: session = requests.Session() # uncomment this to run locally without SSL - # session.verify = False + session.verify = False response = session.post(callback_url, json=data) response.raise_for_status() return True From c02b35df1cff994da72d57531c0ec5468b86931f Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Mon, 28 Jul 2025 00:31:11 +0530 Subject: [PATCH 07/29] updated business logic and testcases --- backend/app/api/routes/responses.py | 50 ++-- backend/app/crud/openai_conversation.py | 41 +++ .../tests/crud/test_openai_conversation.py | 251 ++++++++++++++++++ 3 files changed, 307 insertions(+), 35 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index f44acb6b..6f2ba01b 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -13,7 +13,7 @@ from app.crud.credentials import get_provider_credential from app.crud.openai_conversation import ( create_conversation, - get_conversation_by_response_id, + set_ancestor_response_id, ) from app.models import UserProjectOrg, OpenAIConversationCreate from app.utils import APIResponse, mask_string @@ -172,24 +172,13 @@ def process_response( }, ) - # Determine ancestor_response_id based on previous_response_id - ancestor_response_id = None - if response.previous_response_id is None: - # If previous_response_id is None, then ancestor_response_id = response.id - ancestor_response_id = response.id - else: - # If previous_response_id is not None, look in db for that ID - previous_conversation = get_conversation_by_response_id( - session=session, - response_id=response.previous_response_id, - project_id=project_id, - ) - if previous_conversation: - # If found, use that conversation's ancestor_id - ancestor_response_id = previous_conversation.ancestor_response_id - else: - # If not found, ancestor_response_id = previous_response_id - ancestor_response_id = request.response_id + # Set ancestor_response_id using CRUD function + ancestor_response_id = set_ancestor_response_id( + session=session, + current_response_id=response.id, + previous_response_id=response.previous_response_id, + project_id=project_id, + ) # Create conversation record in database conversation_data = OpenAIConversationCreate( @@ -422,22 +411,13 @@ async def responses_sync( }, ) - # Determine ancestor_response_id based on previous_response_id for sync endpoint - ancestor_response_id = None - if request.response_id is None: - # If previous_response_id is None, then ancestor_response_id = response.id - ancestor_response_id = response.id - else: - # If previous_response_id is not None, look in db for that ID - previous_conversation = get_conversation_by_response_id( - session=_session, response_id=request.response_id, project_id=project_id - ) - if previous_conversation: - # If found, use that conversation's ancestor_id - ancestor_response_id = previous_conversation.ancestor_response_id - else: - # If not found, ancestor_response_id = previous_response_id - ancestor_response_id = request.response_id + # Set ancestor_response_id using CRUD function for sync endpoint + ancestor_response_id = set_ancestor_response_id( + session=_session, + current_response_id=response.id, + previous_response_id=response.previous_response_id, + project_id=project_id, + ) # Create conversation record in database conversation_data = OpenAIConversationCreate( diff --git a/backend/app/crud/openai_conversation.py b/backend/app/crud/openai_conversation.py index ad072132..02b06abe 100644 --- a/backend/app/crud/openai_conversation.py +++ b/backend/app/crud/openai_conversation.py @@ -52,6 +52,47 @@ def get_conversation_by_ancestor_id( return result +def set_ancestor_response_id( + session: Session, + current_response_id: str, + previous_response_id: Optional[str], + project_id: int, +) -> str: + """ + Set the ancestor_response_id based on previous_response_id. + + Logic: + 1. If previous_response_id is None, then ancestor_response_id = current_response_id + 2. If previous_response_id is not None, look in db for that ID + - If found, use that conversation's ancestor_id + - If not found, ancestor_response_id = current_response_id + + Args: + session: Database session + current_response_id: The current response ID + previous_response_id: The previous response ID (can be None) + project_id: The project ID for scoping the search + + Returns: + str: The determined ancestor_response_id + """ + if previous_response_id is None: + # If previous_response_id is None, then ancestor_response_id = current_response_id + return current_response_id + + # If previous_response_id is not None, look in db for that ID + previous_conversation = get_conversation_by_response_id( + session=session, response_id=previous_response_id, project_id=project_id + ) + + if previous_conversation: + # If found, use that conversation's ancestor_id + return previous_conversation.ancestor_response_id + else: + # If not found, ancestor_response_id = previous_response_id + return current_response_id + + def get_conversations_by_project( session: Session, project_id: int, diff --git a/backend/app/tests/crud/test_openai_conversation.py b/backend/app/tests/crud/test_openai_conversation.py index 79f033f7..8fd6dd27 100644 --- a/backend/app/tests/crud/test_openai_conversation.py +++ b/backend/app/tests/crud/test_openai_conversation.py @@ -9,6 +9,7 @@ get_conversations_by_project, create_conversation, delete_conversation, + set_ancestor_response_id, ) from app.models import OpenAIConversationCreate from app.tests.utils.conversation import get_conversation @@ -235,3 +236,253 @@ def test_conversation_soft_delete_behavior(db: Session): project_id=project.id, ) assert conversation.id not in [c.id for c in conversations] + + +def test_set_ancestor_response_id_no_previous_response(db: Session): + """Test set_ancestor_response_id when previous_response_id is None.""" + project = get_project(db) + current_response_id = f"resp_{uuid4()}" + + ancestor_id = set_ancestor_response_id( + session=db, + current_response_id=current_response_id, + previous_response_id=None, + project_id=project.id, + ) + + assert ancestor_id == current_response_id + + +def test_set_ancestor_response_id_previous_not_found(db: Session): + """Test set_ancestor_response_id when previous_response_id is not found in DB.""" + project = get_project(db) + current_response_id = f"resp_{uuid4()}" + previous_response_id = f"resp_{uuid4()}" + + ancestor_id = set_ancestor_response_id( + session=db, + current_response_id=current_response_id, + previous_response_id=previous_response_id, + project_id=project.id, + ) + + assert ancestor_id == current_response_id + + +def test_set_ancestor_response_id_previous_found_with_ancestor(db: Session): + """Test set_ancestor_response_id when previous_response_id is found and has an ancestor.""" + project = get_project(db) + organization = get_organization(db) + + # Create a conversation chain: ancestor -> previous -> current + ancestor_response_id = f"resp_{uuid4()}" + + # Create the ancestor conversation + ancestor_conversation_data = OpenAIConversationCreate( + response_id=ancestor_response_id, + ancestor_response_id=ancestor_response_id, # Self-referencing + previous_response_id=None, + user_question="Original question", + response="Original response", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + ancestor_conversation = create_conversation( + session=db, + conversation=ancestor_conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + # Create the previous conversation + previous_response_id = f"resp_{uuid4()}" + previous_conversation_data = OpenAIConversationCreate( + response_id=previous_response_id, + ancestor_response_id=ancestor_response_id, + previous_response_id=ancestor_response_id, + user_question="Previous question", + response="Previous response", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + previous_conversation = create_conversation( + session=db, + conversation=previous_conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + # Test the current conversation + current_response_id = f"resp_{uuid4()}" + ancestor_id = set_ancestor_response_id( + session=db, + current_response_id=current_response_id, + previous_response_id=previous_response_id, + project_id=project.id, + ) + + assert ancestor_id == ancestor_response_id + + +def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): + """Test set_ancestor_response_id when previous_response_id is found but has no ancestor.""" + project = get_project(db) + organization = get_organization(db) + + # Create a previous conversation without ancestor + previous_response_id = f"resp_{uuid4()}" + previous_conversation_data = OpenAIConversationCreate( + response_id=previous_response_id, + ancestor_response_id=None, # No ancestor + previous_response_id=None, + user_question="Previous question", + response="Previous response", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + previous_conversation = create_conversation( + session=db, + conversation=previous_conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + # Test the current conversation + current_response_id = f"resp_{uuid4()}" + ancestor_id = set_ancestor_response_id( + session=db, + current_response_id=current_response_id, + previous_response_id=previous_response_id, + project_id=project.id, + ) + + assert ancestor_id is None + + +def test_set_ancestor_response_id_different_project(db: Session): + """Test set_ancestor_response_id respects project scoping.""" + project1 = get_project(db) + organization = get_organization(db) + + # Create a second project with a different name + from app.models import Project + + project2 = Project( + name=f"test_project_{uuid4()}", + description="Test project for scoping", + is_active=True, + organization_id=organization.id, + ) + db.add(project2) + db.commit() + db.refresh(project2) + + # Create a conversation in project1 + previous_response_id = f"resp_{uuid4()}" + previous_conversation_data = OpenAIConversationCreate( + response_id=previous_response_id, + ancestor_response_id=f"ancestor_{uuid4()}", + previous_response_id=None, + user_question="Previous question", + response="Previous response", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + create_conversation( + session=db, + conversation=previous_conversation_data, + project_id=project1.id, + organization_id=organization.id, + ) + + # Test looking for it in project2 (should not find it) + current_response_id = f"resp_{uuid4()}" + ancestor_id = set_ancestor_response_id( + session=db, + current_response_id=current_response_id, + previous_response_id=previous_response_id, + project_id=project2.id, + ) + + # Should return current_response_id since it's not found in project2 + assert ancestor_id == current_response_id + + +def test_set_ancestor_response_id_complex_chain(db: Session): + """Test set_ancestor_response_id with a complex conversation chain.""" + project = get_project(db) + organization = get_organization(db) + + # Create a complex chain: A -> B -> C -> D + # A is the root ancestor + response_a = f"resp_{uuid4()}" + conversation_a_data = OpenAIConversationCreate( + response_id=response_a, + ancestor_response_id=response_a, # Self-referencing + previous_response_id=None, + user_question="Question A", + response="Response A", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + create_conversation( + session=db, + conversation=conversation_a_data, + project_id=project.id, + organization_id=organization.id, + ) + + # B references A + response_b = f"resp_{uuid4()}" + conversation_b_data = OpenAIConversationCreate( + response_id=response_b, + ancestor_response_id=response_a, + previous_response_id=response_a, + user_question="Question B", + response="Response B", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + create_conversation( + session=db, + conversation=conversation_b_data, + project_id=project.id, + organization_id=organization.id, + ) + + # C references B + response_c = f"resp_{uuid4()}" + conversation_c_data = OpenAIConversationCreate( + response_id=response_c, + ancestor_response_id=response_a, # Should inherit from B + previous_response_id=response_b, + user_question="Question C", + response="Response C", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + create_conversation( + session=db, + conversation=conversation_c_data, + project_id=project.id, + organization_id=organization.id, + ) + + # Test D referencing C + response_d = f"resp_{uuid4()}" + ancestor_id = set_ancestor_response_id( + session=db, + current_response_id=response_d, + previous_response_id=response_c, + project_id=project.id, + ) + + # Should return response_a (the root ancestor) + assert ancestor_id == response_a From 10ccbb3ca421afb57286f5300c3a4eb688c5fd39 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Mon, 28 Jul 2025 09:44:33 +0530 Subject: [PATCH 08/29] revert few updates --- backend/app/crud/openai_conversation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/app/crud/openai_conversation.py b/backend/app/crud/openai_conversation.py index 02b06abe..36d2f1b8 100644 --- a/backend/app/crud/openai_conversation.py +++ b/backend/app/crud/openai_conversation.py @@ -65,7 +65,7 @@ def set_ancestor_response_id( 1. If previous_response_id is None, then ancestor_response_id = current_response_id 2. If previous_response_id is not None, look in db for that ID - If found, use that conversation's ancestor_id - - If not found, ancestor_response_id = current_response_id + - If not found, ancestor_response_id = previous_response_id Args: session: Database session @@ -90,7 +90,7 @@ def set_ancestor_response_id( return previous_conversation.ancestor_response_id else: # If not found, ancestor_response_id = previous_response_id - return current_response_id + return previous_response_id def get_conversations_by_project( From bafa85cf2d6e58aa29d121dedb06e5b33754b430 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Mon, 28 Jul 2025 09:53:03 +0530 Subject: [PATCH 09/29] updated testcases --- .../tests/crud/test_openai_conversation.py | 72 ++++++++++++++++++- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/backend/app/tests/crud/test_openai_conversation.py b/backend/app/tests/crud/test_openai_conversation.py index 8fd6dd27..802a25fe 100644 --- a/backend/app/tests/crud/test_openai_conversation.py +++ b/backend/app/tests/crud/test_openai_conversation.py @@ -266,7 +266,8 @@ def test_set_ancestor_response_id_previous_not_found(db: Session): project_id=project.id, ) - assert ancestor_id == current_response_id + # When previous_response_id is not found, should return previous_response_id + assert ancestor_id == previous_response_id def test_set_ancestor_response_id_previous_found_with_ancestor(db: Session): @@ -359,6 +360,7 @@ def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): project_id=project.id, ) + # When previous conversation has no ancestor, should return None assert ancestor_id is None @@ -408,8 +410,8 @@ def test_set_ancestor_response_id_different_project(db: Session): project_id=project2.id, ) - # Should return current_response_id since it's not found in project2 - assert ancestor_id == current_response_id + # Should return previous_response_id since it's not found in project2 + assert ancestor_id == previous_response_id def test_set_ancestor_response_id_complex_chain(db: Session): @@ -486,3 +488,67 @@ def test_set_ancestor_response_id_complex_chain(db: Session): # Should return response_a (the root ancestor) assert ancestor_id == response_a + + +def test_create_conversation_success(db: Session): + """Test successful conversation creation.""" + project = get_project(db) + organization = get_organization(db) + + conversation_data = OpenAIConversationCreate( + response_id=f"resp_{uuid4()}", + ancestor_response_id=None, + previous_response_id=None, + user_question="Test question", + response="Test response", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + conversation = create_conversation( + session=db, + conversation=conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + assert conversation is not None + assert conversation.response_id == conversation_data.response_id + assert conversation.user_question == conversation_data.user_question + assert conversation.response == conversation_data.response + assert conversation.model == conversation_data.model + assert conversation.assistant_id == conversation_data.assistant_id + assert conversation.project_id == project.id + assert conversation.organization_id == organization.id + assert conversation.is_deleted is False + assert conversation.deleted_at is None + + +def test_create_conversation_with_ancestor(db: Session): + """Test conversation creation with ancestor and previous response IDs.""" + project = get_project(db) + organization = get_organization(db) + + ancestor_response_id = f"resp_{uuid4()}" + previous_response_id = f"resp_{uuid4()}" + + conversation_data = OpenAIConversationCreate( + response_id=f"resp_{uuid4()}", + ancestor_response_id=ancestor_response_id, + previous_response_id=previous_response_id, + user_question="Follow-up question", + response="Follow-up response", + model="gpt-4o", + assistant_id=f"asst_{uuid4()}", + ) + + conversation = create_conversation( + session=db, + conversation=conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + assert conversation is not None + assert conversation.ancestor_response_id == ancestor_response_id + assert conversation.previous_response_id == previous_response_id From 29e7655b1b526c4e0e5bc7c7052482985c2ad4e7 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 09:58:13 +0530 Subject: [PATCH 10/29] fixing testcases --- backend/app/api/routes/responses.py | 14 +- backend/app/models/openai_conversation.py | 7 +- .../app/tests/api/routes/test_responses.py | 390 +++++++++++++++++- .../tests/crud/test_openai_conversation.py | 133 ++++-- .../tests/scripts/test_backend_pre_start.py | 2 +- .../app/tests/scripts/test_test_pre_start.py | 2 +- 6 files changed, 496 insertions(+), 52 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index 6f2ba01b..9b8cf20e 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -25,8 +25,20 @@ def handle_openai_error(e: openai.OpenAIError) -> str: """Extract error message from OpenAI error.""" - if isinstance(e.body, dict) and "message" in e.body: + # Try to get error message from different possible attributes + if hasattr(e, "body") and isinstance(e.body, dict) and "message" in e.body: return e.body["message"] + elif hasattr(e, "message"): + return e.message + elif hasattr(e, "response") and hasattr(e.response, "json"): + try: + error_data = e.response.json() + if isinstance(error_data, dict) and "error" in error_data: + error_info = error_data["error"] + if isinstance(error_info, dict) and "message" in error_info: + return error_info["message"] + except: + pass return str(e) diff --git a/backend/app/models/openai_conversation.py b/backend/app/models/openai_conversation.py index 07018f0c..6003c720 100644 --- a/backend/app/models/openai_conversation.py +++ b/backend/app/models/openai_conversation.py @@ -7,10 +7,6 @@ from app.core.util import now -class OpenAIConversationBase(SQLModel): - response_id: str = Field(index=True, description="OpenAI response ID") - ancestor_response_id: Optional[str] = Field( - default=None, def validate_response_id_pattern(v: str) -> str: """Shared validation function for response ID patterns""" @@ -58,6 +54,7 @@ class OpenAIConversationBase(SQLModel): def validate_response_ids(cls, v): return validate_response_id_pattern(v) + class OpenAIConversation(OpenAIConversationBase, table=True): __tablename__ = "openai_conversation" @@ -111,4 +108,4 @@ class OpenAIConversationPublic(OpenAIConversationBase): class Config: from_attributes = True populate_by_name = True - use_enum_values = True \ No newline at end of file + use_enum_values = True diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index aac0a2be..f187e584 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -1,11 +1,13 @@ -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, patch, AsyncMock import pytest from fastapi import FastAPI from fastapi.testclient import TestClient from sqlmodel import select +import openai from app.api.routes.responses import router -from app.models import Project +from app.models import Project, Assistant +from app.core.exception_handlers import HTTPException # Wrap the router in a FastAPI app instance app = FastAPI() @@ -15,29 +17,60 @@ @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.get_assistant_by_id") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") def test_responses_endpoint_success( - mock_get_credential, mock_openai, db, user_api_key_header: dict[str, str] + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_assistant, + mock_get_credential, + mock_openai, + db, + user_api_key_header: dict[str, str], ): """Test the /responses endpoint for successful response creation.""" # Setup mock credentials mock_get_credential.return_value = {"api_key": "test_api_key"} + # Setup mock assistant + mock_assistant = MagicMock() + mock_assistant.model = "gpt-4o" + mock_assistant.instructions = "Test instructions" + mock_assistant.temperature = 0.1 + mock_assistant.vector_store_ids = ["vs_test"] + mock_assistant.max_num_results = 20 + mock_get_assistant.return_value = mock_assistant + # Setup mock OpenAI client mock_client = MagicMock() mock_openai.return_value = mock_client - # Setup the mock response object with real values for all used fields + # Setup the mock response object with proper response ID format mock_response = MagicMock() - mock_response.id = "mock_response_id" + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" mock_response.output_text = "Test output" mock_response.model = "gpt-4o" mock_response.usage.input_tokens = 10 mock_response.usage.output_tokens = 5 mock_response.usage.total_tokens = 15 mock_response.output = [] + mock_response.previous_response_id = None mock_client.responses.create.return_value = mock_response - # Get the Dalgo project ID (the assistant is created for this project) + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_ancestor1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + # Get the Dalgo project ID dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() if not dalgo_project: pytest.skip("Dalgo project not found in the database") @@ -60,7 +93,13 @@ def test_responses_endpoint_success( @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.get_assistant_by_id") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") def test_responses_endpoint_without_vector_store( + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, mock_get_assistant, mock_get_credential, mock_openai, @@ -77,23 +116,35 @@ def test_responses_endpoint_without_vector_store( mock_assistant.instructions = "Test instructions" mock_assistant.temperature = 0.1 mock_assistant.vector_store_ids = [] # No vector store configured + mock_assistant.max_num_results = 20 mock_get_assistant.return_value = mock_assistant # Setup mock OpenAI client mock_client = MagicMock() mock_openai.return_value = mock_client - # Setup the mock response object + # Setup the mock response object with proper response ID format mock_response = MagicMock() - mock_response.id = "mock_response_id" + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" mock_response.output_text = "Test output" mock_response.model = "gpt-4" mock_response.usage.input_tokens = 10 mock_response.usage.output_tokens = 5 mock_response.usage.total_tokens = 15 - # No output attribute since there are no tool calls + mock_response.output = [] + mock_response.previous_response_id = None mock_client.responses.create.return_value = mock_response + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_ancestor1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + # Get the Glific project ID glific_project = db.exec(select(Project).where(Project.name == "Glific")).first() if not glific_project: @@ -120,3 +171,324 @@ def test_responses_endpoint_without_vector_store( temperature=mock_assistant.temperature, input=[{"role": "user", "content": "What is Glific?"}], ) + + +@patch("app.api.routes.responses.get_assistant_by_id") +def test_responses_endpoint_assistant_not_found( + mock_get_assistant, + db, + user_api_key_header, +): + """Test the /responses endpoint when assistant is not found.""" + # Setup mock assistant to return None (not found) + mock_get_assistant.return_value = None + + request_data = { + "assistant_id": "nonexistent_assistant", + "question": "What is this?", + "callback_url": "http://example.com/callback", + } + + response = client.post("/responses", json=request_data, headers=user_api_key_header) + assert response.status_code == 404 + response_json = response.json() + assert response_json["detail"] == "Assistant not found or not active" + + +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.get_assistant_by_id") +def test_responses_endpoint_no_openai_credentials( + mock_get_assistant, + mock_get_credential, + db, + user_api_key_header, +): + """Test the /responses endpoint when OpenAI credentials are not configured.""" + # Setup mock assistant + mock_assistant = MagicMock() + mock_assistant.model = "gpt-4" + mock_assistant.instructions = "Test instructions" + mock_assistant.temperature = 0.1 + mock_assistant.vector_store_ids = [] + mock_get_assistant.return_value = mock_assistant + + # Setup mock credentials to return None (no credentials) + mock_get_credential.return_value = None + + request_data = { + "assistant_id": "assistant_123", + "question": "What is this?", + "callback_url": "http://example.com/callback", + } + + response = client.post("/responses", json=request_data, headers=user_api_key_header) + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is False + assert "OpenAI API key not configured" in response_json["error"] + + +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.get_assistant_by_id") +def test_responses_endpoint_missing_api_key_in_credentials( + mock_get_assistant, + mock_get_credential, + db, + user_api_key_header, +): + """Test the /responses endpoint when credentials exist but don't have api_key.""" + # Setup mock assistant + mock_assistant = MagicMock() + mock_assistant.model = "gpt-4" + mock_assistant.instructions = "Test instructions" + mock_assistant.temperature = 0.1 + mock_assistant.vector_store_ids = [] + mock_get_assistant.return_value = mock_assistant + + # Setup mock credentials without api_key + mock_get_credential.return_value = {"other_key": "value"} + + request_data = { + "assistant_id": "assistant_123", + "question": "What is this?", + "callback_url": "http://example.com/callback", + } + + response = client.post("/responses", json=request_data, headers=user_api_key_header) + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is False + assert "OpenAI API key not configured" in response_json["error"] + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") +def test_responses_sync_endpoint_success( + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_credential, + mock_openai, + db, + user_api_key_header, +): + """Test the /responses/sync endpoint for successful response creation.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock OpenAI client + mock_client = MagicMock() + mock_openai.return_value = mock_client + + # Setup the mock response object with proper response ID format + mock_response = MagicMock() + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" + mock_response.output_text = "Test output" + mock_response.model = "gpt-4o" + mock_response.usage.input_tokens = 10 + mock_response.usage.output_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_response.output = [] + mock_response.previous_response_id = None + mock_client.responses.create.return_value = mock_response + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_ancestor1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + request_data = { + "model": "gpt-4o", + "instructions": "Test instructions", + "vector_store_ids": ["vs_test"], + "max_num_results": 20, + "temperature": 0.1, + "question": "What is this?", + } + + response = client.post( + "/responses/sync", json=request_data, headers=user_api_key_header + ) + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is True + assert response_json["data"]["status"] == "success" + assert ( + response_json["data"]["response_id"] + == "resp_1234567890abcdef1234567890abcdef1234567890" + ) + assert response_json["data"]["message"] == "Test output" + + +@patch("app.api.routes.responses.get_provider_credential") +def test_responses_sync_endpoint_no_openai_credentials( + mock_get_credential, + db, + user_api_key_header, +): + """Test the /responses/sync endpoint when OpenAI credentials are not configured.""" + # Setup mock credentials to return None (no credentials) + mock_get_credential.return_value = None + + request_data = { + "model": "gpt-4o", + "instructions": "Test instructions", + "vector_store_ids": ["vs_test"], + "max_num_results": 20, + "temperature": 0.1, + "question": "What is this?", + } + + response = client.post( + "/responses/sync", json=request_data, headers=user_api_key_header + ) + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is False + assert "OpenAI API key not configured" in response_json["error"] + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.LangfuseTracer") +def test_responses_sync_endpoint_openai_error( + mock_tracer_class, + mock_get_credential, + mock_openai, + db, + user_api_key_header, +): + """Test the /responses/sync endpoint when OpenAI API returns an error.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock OpenAI client that raises an error + mock_client = MagicMock() + mock_openai.return_value = mock_client + mock_client.responses.create.side_effect = openai.OpenAIError("OpenAI API error") + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + request_data = { + "model": "gpt-4o", + "instructions": "Test instructions", + "vector_store_ids": ["vs_test"], + "max_num_results": 20, + "temperature": 0.1, + "question": "What is this?", + } + + response = client.post( + "/responses/sync", json=request_data, headers=user_api_key_header + ) + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is False + assert "OpenAI API error" in response_json["error"] + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.get_assistant_by_id") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") +def test_responses_endpoint_with_file_search_results( + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_assistant, + mock_get_credential, + mock_openai, + db, + user_api_key_header, +): + """Test the /responses endpoint with file search results in the response.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock assistant with vector store + mock_assistant = MagicMock() + mock_assistant.model = "gpt-4o" + mock_assistant.instructions = "Test instructions" + mock_assistant.temperature = 0.1 + mock_assistant.vector_store_ids = ["vs_test"] + mock_assistant.max_num_results = 20 + mock_get_assistant.return_value = mock_assistant + + # Setup mock OpenAI client + mock_client = MagicMock() + mock_openai.return_value = mock_client + + # Setup mock file search results + mock_hit1 = MagicMock() + mock_hit1.score = 0.95 + mock_hit1.text = "First search result" + + mock_hit2 = MagicMock() + mock_hit2.score = 0.85 + mock_hit2.text = "Second search result" + + mock_file_search_call = MagicMock() + mock_file_search_call.type = "file_search_call" + mock_file_search_call.results = [mock_hit1, mock_hit2] + + # Setup the mock response object with file search results and proper response ID format + mock_response = MagicMock() + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" + mock_response.output_text = "Test output with search results" + mock_response.model = "gpt-4o" + mock_response.usage.input_tokens = 10 + mock_response.usage.output_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_response.output = [mock_file_search_call] + mock_response.previous_response_id = None + mock_client.responses.create.return_value = mock_response + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_ancestor1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + # Get the Dalgo project ID + dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() + if not dalgo_project: + pytest.skip("Dalgo project not found in the database") + + request_data = { + "assistant_id": "assistant_dalgo", + "question": "What is Dalgo?", + "callback_url": "http://example.com/callback", + } + + response = client.post("/responses", json=request_data, headers=user_api_key_header) + + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is True + assert response_json["data"]["status"] == "processing" + assert response_json["data"]["message"] == "Response creation started" + + # Verify OpenAI client was called with tools + mock_client.responses.create.assert_called_once() + call_args = mock_client.responses.create.call_args[1] + assert "tools" in call_args + assert call_args["tools"][0]["type"] == "file_search" + assert call_args["tools"][0]["vector_store_ids"] == ["vs_test"] + assert "include" in call_args + assert "file_search_call.results" in call_args["include"] diff --git a/backend/app/tests/crud/test_openai_conversation.py b/backend/app/tests/crud/test_openai_conversation.py index 619f6695..614dcce3 100644 --- a/backend/app/tests/crud/test_openai_conversation.py +++ b/backend/app/tests/crud/test_openai_conversation.py @@ -7,7 +7,7 @@ get_conversation_by_response_id, get_conversation_by_ancestor_id, get_conversations_by_project, - set_ancestor_response_id + set_ancestor_response_id, get_conversations_count_by_project, create_conversation, delete_conversation, @@ -16,6 +16,7 @@ from app.tests.utils.utils import get_project, get_organization from app.tests.utils.openai import generate_openai_id + def test_get_conversation_by_id_success(db: Session): """Test successful conversation retrieval by ID.""" project = get_project(db) @@ -38,7 +39,7 @@ def test_get_conversation_by_id_success(db: Session): project_id=project.id, organization_id=organization.id, ) - + retrieved_conversation = get_conversation_by_id( session=db, conversation_id=conversation.id, @@ -246,7 +247,7 @@ def test_delete_conversation_success(db: Session): project_id=project.id, organization_id=organization.id, ) - + deleted_conversation = delete_conversation( session=db, conversation_id=conversation.id, @@ -323,6 +324,7 @@ def test_conversation_soft_delete_behavior(db: Session): ) assert conversation.id not in [c.id for c in conversations] + def test_set_ancestor_response_id_no_previous_response(db: Session): """Test set_ancestor_response_id when previous_response_id is None.""" project = get_project(db) @@ -361,7 +363,7 @@ def test_set_ancestor_response_id_previous_found_with_ancestor(db: Session): organization = get_organization(db) # Create a conversation chain: ancestor -> previous -> current - ancestor_response_id = f"resp_{uuid4()}" + ancestor_response_id = generate_openai_id("resp_", 40) # Create the ancestor conversation ancestor_conversation_data = OpenAIConversationCreate( @@ -371,7 +373,7 @@ def test_set_ancestor_response_id_previous_found_with_ancestor(db: Session): user_question="Original question", response="Original response", model="gpt-4o", - assistant_id=f"asst_{uuid4()}", + assistant_id=generate_openai_id("asst_", 20), ) ancestor_conversation = create_conversation( @@ -382,7 +384,7 @@ def test_set_ancestor_response_id_previous_found_with_ancestor(db: Session): ) # Create the previous conversation - previous_response_id = f"resp_{uuid4()}" + previous_response_id = generate_openai_id("resp_", 40) previous_conversation_data = OpenAIConversationCreate( response_id=previous_response_id, ancestor_response_id=ancestor_response_id, @@ -390,12 +392,28 @@ def test_set_ancestor_response_id_previous_found_with_ancestor(db: Session): user_question="Previous question", response="Previous response", model="gpt-4o", - assistant_id=f"asst_{uuid4()}", + assistant_id=generate_openai_id("asst_", 20), ) previous_conversation = create_conversation( session=db, conversation=previous_conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + # Test the current conversation + current_response_id = f"resp_{uuid4()}" + ancestor_id = set_ancestor_response_id( + session=db, + current_response_id=current_response_id, + previous_response_id=previous_response_id, + project_id=project.id, + ) + + # Should return the ancestor_response_id from the previous conversation + assert ancestor_id == ancestor_response_id + def test_get_conversations_count_by_project_success(db: Session): """Test successful conversation count retrieval by project.""" @@ -459,15 +477,15 @@ def test_get_conversations_count_by_project_excludes_deleted(db: Session): ) # Test the current conversation - current_response_id = f"resp_{uuid4()}" + current_response_id = generate_openai_id("resp_", 40) ancestor_id = set_ancestor_response_id( session=db, current_response_id=current_response_id, - previous_response_id=previous_response_id, + previous_response_id=conversation.response_id, project_id=project.id, ) - assert ancestor_id == ancestor_response_id + assert ancestor_id == conversation.ancestor_response_id def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): @@ -476,15 +494,15 @@ def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): organization = get_organization(db) # Create a previous conversation without ancestor - previous_response_id = f"resp_{uuid4()}" + previous_response_id = generate_openai_id("resp_", 40) previous_conversation_data = OpenAIConversationCreate( response_id=previous_response_id, - ancestor_response_id=None, # No ancestor + ancestor_response_id=previous_response_id, # Self-referencing for root previous_response_id=None, user_question="Previous question", response="Previous response", model="gpt-4o", - assistant_id=f"asst_{uuid4()}", + assistant_id=generate_openai_id("asst_", 20), ) previous_conversation = create_conversation( @@ -495,7 +513,7 @@ def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): ) # Test the current conversation - current_response_id = f"resp_{uuid4()}" + current_response_id = generate_openai_id("resp_", 40) ancestor_id = set_ancestor_response_id( session=db, current_response_id=current_response_id, @@ -503,8 +521,8 @@ def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): project_id=project.id, ) - # When previous conversation has no ancestor, should return None - assert ancestor_id is None + # When previous conversation is root (self-referencing), should return the previous_response_id + assert ancestor_id == previous_response_id def test_set_ancestor_response_id_different_project(db: Session): @@ -526,15 +544,15 @@ def test_set_ancestor_response_id_different_project(db: Session): db.refresh(project2) # Create a conversation in project1 - previous_response_id = f"resp_{uuid4()}" + previous_response_id = generate_openai_id("resp_", 40) previous_conversation_data = OpenAIConversationCreate( response_id=previous_response_id, - ancestor_response_id=f"ancestor_{uuid4()}", + ancestor_response_id=generate_openai_id("resp_", 40), previous_response_id=None, user_question="Previous question", response="Previous response", model="gpt-4o", - assistant_id=f"asst_{uuid4()}", + assistant_id=generate_openai_id("asst_", 20), ) create_conversation( @@ -545,7 +563,7 @@ def test_set_ancestor_response_id_different_project(db: Session): ) # Test looking for it in project2 (should not find it) - current_response_id = f"resp_{uuid4()}" + current_response_id = generate_openai_id("resp_", 40) ancestor_id = set_ancestor_response_id( session=db, current_response_id=current_response_id, @@ -564,7 +582,7 @@ def test_set_ancestor_response_id_complex_chain(db: Session): # Create a complex chain: A -> B -> C -> D # A is the root ancestor - response_a = f"resp_{uuid4()}" + response_a = generate_openai_id("resp_", 40) conversation_a_data = OpenAIConversationCreate( response_id=response_a, ancestor_response_id=response_a, # Self-referencing @@ -572,7 +590,7 @@ def test_set_ancestor_response_id_complex_chain(db: Session): user_question="Question A", response="Response A", model="gpt-4o", - assistant_id=f"asst_{uuid4()}", + assistant_id=generate_openai_id("asst_", 20), ) create_conversation( @@ -583,7 +601,7 @@ def test_set_ancestor_response_id_complex_chain(db: Session): ) # B references A - response_b = f"resp_{uuid4()}" + response_b = generate_openai_id("resp_", 40) conversation_b_data = OpenAIConversationCreate( response_id=response_b, ancestor_response_id=response_a, @@ -591,7 +609,7 @@ def test_set_ancestor_response_id_complex_chain(db: Session): user_question="Question B", response="Response B", model="gpt-4o", - assistant_id=f"asst_{uuid4()}", + assistant_id=generate_openai_id("asst_", 20), ) create_conversation( @@ -602,7 +620,7 @@ def test_set_ancestor_response_id_complex_chain(db: Session): ) # C references B - response_c = f"resp_{uuid4()}" + response_c = generate_openai_id("resp_", 40) conversation_c_data = OpenAIConversationCreate( response_id=response_c, ancestor_response_id=response_a, # Should inherit from B @@ -610,7 +628,7 @@ def test_set_ancestor_response_id_complex_chain(db: Session): user_question="Question C", response="Response C", model="gpt-4o", - assistant_id=f"asst_{uuid4()}", + assistant_id=generate_openai_id("asst_", 20), ) create_conversation( @@ -621,7 +639,7 @@ def test_set_ancestor_response_id_complex_chain(db: Session): ) # Test D referencing C - response_d = f"resp_{uuid4()}" + response_d = generate_openai_id("resp_", 40) ancestor_id = set_ancestor_response_id( session=db, current_response_id=response_d, @@ -639,9 +657,54 @@ def test_create_conversation_success(db: Session): organization = get_organization(db) conversation_data = OpenAIConversationCreate( - response_id=f"resp_{uuid4()}", - ancestor_response_id=None, - + response_id=generate_openai_id("resp_", 40), + ancestor_response_id=generate_openai_id("resp_", 40), + previous_response_id=None, + user_question="Test question", + response="Test response", + model="gpt-4o", + assistant_id=generate_openai_id("asst_", 20), + ) + + conversation = create_conversation( + session=db, + conversation=conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + + assert conversation is not None + assert conversation.response_id == conversation_data.response_id + assert conversation.user_question == conversation_data.user_question + assert conversation.response == conversation_data.response + assert conversation.model == conversation_data.model + assert conversation.project_id == project.id + assert conversation.organization_id == organization.id + + +def test_delete_conversation_excludes_from_count(db: Session): + """Test that deleted conversations are excluded from count.""" + project = get_project(db) + organization = get_organization(db) + + # Create a conversation + conversation_data = OpenAIConversationCreate( + response_id=generate_openai_id("resp_", 40), + ancestor_response_id=generate_openai_id("resp_", 40), + previous_response_id=None, + user_question="Test question", + response="Test response", + model="gpt-4o", + assistant_id=generate_openai_id("asst_", 20), + ) + + conversation = create_conversation( + session=db, + conversation=conversation_data, + project_id=project.id, + organization_id=organization.id, + ) + # Get count before deletion count_before = get_conversations_count_by_project( session=db, @@ -761,17 +824,17 @@ def test_create_conversation_with_ancestor(db: Session): project = get_project(db) organization = get_organization(db) - ancestor_response_id = f"resp_{uuid4()}" - previous_response_id = f"resp_{uuid4()}" + ancestor_response_id = generate_openai_id("resp_", 40) + previous_response_id = generate_openai_id("resp_", 40) conversation_data = OpenAIConversationCreate( - response_id=f"resp_{uuid4()}", + response_id=generate_openai_id("resp_", 40), ancestor_response_id=ancestor_response_id, previous_response_id=previous_response_id, user_question="Follow-up question", response="Follow-up response", model="gpt-4o", - assistant_id=f"asst_{uuid4()}", + assistant_id=generate_openai_id("asst_", 20), ) conversation = create_conversation( @@ -784,7 +847,7 @@ def test_create_conversation_with_ancestor(db: Session): assert conversation is not None assert conversation.ancestor_response_id == ancestor_response_id assert conversation.previous_response_id == previous_response_id - assert conversation.response_id == valid_response_id + assert conversation.response_id == conversation_data.response_id # Test invalid response ID (too short) invalid_response_id = "resp_123" diff --git a/backend/app/tests/scripts/test_backend_pre_start.py b/backend/app/tests/scripts/test_backend_pre_start.py index 631690fc..a308d588 100644 --- a/backend/app/tests/scripts/test_backend_pre_start.py +++ b/backend/app/tests/scripts/test_backend_pre_start.py @@ -28,6 +28,6 @@ def test_init_successful_connection() -> None: connection_successful ), "The database connection should be successful and not raise an exception." - assert session_mock.exec.called_once_with( + session_mock.exec.assert_called_once_with( select(1) ), "The session should execute a select statement once." diff --git a/backend/app/tests/scripts/test_test_pre_start.py b/backend/app/tests/scripts/test_test_pre_start.py index a176f380..42063360 100644 --- a/backend/app/tests/scripts/test_test_pre_start.py +++ b/backend/app/tests/scripts/test_test_pre_start.py @@ -28,6 +28,6 @@ def test_init_successful_connection() -> None: connection_successful ), "The database connection should be successful and not raise an exception." - assert session_mock.exec.called_once_with( + session_mock.exec.assert_called_once_with( select(1) ), "The session should execute a select statement once." From 6a5333df563690fd2ebf2311d1a003f4618bb932 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 11:10:20 +0530 Subject: [PATCH 11/29] added logic for ancestor lookup --- backend/app/api/routes/responses.py | 27 +- .../app/tests/api/routes/test_responses.py | 559 ++++++++++++++++++ 2 files changed, 584 insertions(+), 2 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index 9b8cf20e..f8ab0831 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -14,6 +14,7 @@ from app.crud.openai_conversation import ( create_conversation, set_ancestor_response_id, + get_conversation_by_ancestor_id, ) from app.models import UserProjectOrg, OpenAIConversationCreate from app.utils import APIResponse, mask_string @@ -135,9 +136,20 @@ def process_response( ) try: + # Get the latest conversation by ancestor ID to use as previous_response_id + previous_response_id = request.response_id + if request.response_id: + latest_conversation = get_conversation_by_ancestor_id( + session=session, + ancestor_response_id=request.response_id, + project_id=project_id, + ) + if latest_conversation: + previous_response_id = latest_conversation.response_id + params = { "model": assistant.model, - "previous_response_id": request.response_id, + "previous_response_id": previous_response_id, "instructions": assistant.instructions, "temperature": assistant.temperature, "input": [{"role": "user", "content": request.question}], @@ -382,9 +394,20 @@ async def responses_sync( ) try: + # Get the latest conversation by ancestor ID to use as previous_response_id + previous_response_id = request.response_id + if request.response_id: + latest_conversation = get_conversation_by_ancestor_id( + session=_session, + ancestor_response_id=request.response_id, + project_id=project_id, + ) + if latest_conversation: + previous_response_id = latest_conversation.response_id + response = client.responses.create( model=request.model, - previous_response_id=request.response_id, + previous_response_id=previous_response_id, instructions=request.instructions, tools=[ { diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index f187e584..b827506f 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -21,7 +21,9 @@ @patch("app.api.routes.responses.LangfuseTracer") @patch("app.api.routes.responses.set_ancestor_response_id") @patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_success( + mock_get_conversation_by_ancestor_id, mock_create_conversation, mock_set_ancestor_response_id, mock_tracer_class, @@ -96,7 +98,9 @@ def test_responses_endpoint_success( @patch("app.api.routes.responses.LangfuseTracer") @patch("app.api.routes.responses.set_ancestor_response_id") @patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_without_vector_store( + mock_get_conversation_by_ancestor_id, mock_create_conversation, mock_set_ancestor_response_id, mock_tracer_class, @@ -266,7 +270,9 @@ def test_responses_endpoint_missing_api_key_in_credentials( @patch("app.api.routes.responses.LangfuseTracer") @patch("app.api.routes.responses.set_ancestor_response_id") @patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_sync_endpoint_success( + mock_get_conversation_by_ancestor_id, mock_create_conversation, mock_set_ancestor_response_id, mock_tracer_class, @@ -403,7 +409,9 @@ def test_responses_sync_endpoint_openai_error( @patch("app.api.routes.responses.LangfuseTracer") @patch("app.api.routes.responses.set_ancestor_response_id") @patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_with_file_search_results( + mock_get_conversation_by_ancestor_id, mock_create_conversation, mock_set_ancestor_response_id, mock_tracer_class, @@ -492,3 +500,554 @@ def test_responses_endpoint_with_file_search_results( assert call_args["tools"][0]["vector_store_ids"] == ["vs_test"] assert "include" in call_args assert "file_search_call.results" in call_args["include"] + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.get_assistant_by_id") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") +def test_responses_endpoint_with_ancestor_conversation_found( + mock_get_conversation_by_ancestor_id, + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_assistant, + mock_get_credential, + mock_openai, + db, + user_api_key_header: dict[str, str], +): + """Test the /responses endpoint when a conversation is found by ancestor ID.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock assistant + mock_assistant = MagicMock() + mock_assistant.model = "gpt-4o" + mock_assistant.instructions = "Test instructions" + mock_assistant.temperature = 0.1 + mock_assistant.vector_store_ids = ["vs_test"] + mock_assistant.max_num_results = 20 + mock_get_assistant.return_value = mock_assistant + + # Setup mock OpenAI client + mock_client = MagicMock() + mock_openai.return_value = mock_client + + # Setup the mock response object + mock_response = MagicMock() + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" + mock_response.output_text = "Test output" + mock_response.model = "gpt-4o" + mock_response.usage.input_tokens = 10 + mock_response.usage.output_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_response.output = [] + mock_response.previous_response_id = "resp_ancestor1234567890abcdef1234567890" + mock_client.responses.create.return_value = mock_response + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_ancestor1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + # Setup mock conversation found by ancestor ID + mock_conversation = MagicMock() + mock_conversation.response_id = "resp_latest1234567890abcdef1234567890" + mock_get_conversation_by_ancestor_id.return_value = mock_conversation + + # Get the Dalgo project ID + dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() + if not dalgo_project: + pytest.skip("Dalgo project not found in the database") + + request_data = { + "assistant_id": "assistant_dalgo", + "question": "What is Dalgo?", + "callback_url": "http://example.com/callback", + "response_id": "resp_ancestor1234567890abcdef1234567890", + } + + response = client.post("/responses", json=request_data, headers=user_api_key_header) + + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is True + assert response_json["data"]["status"] == "processing" + assert response_json["data"]["message"] == "Response creation started" + + # Verify get_conversation_by_ancestor_id was called with correct parameters + mock_get_conversation_by_ancestor_id.assert_called_once() + call_args = mock_get_conversation_by_ancestor_id.call_args + assert ( + call_args[1]["ancestor_response_id"] + == "resp_ancestor1234567890abcdef1234567890" + ) + assert call_args[1]["project_id"] == dalgo_project.id + + # Verify OpenAI client was called with the conversation's response_id as previous_response_id + mock_client.responses.create.assert_called_once() + call_args = mock_client.responses.create.call_args[1] + assert call_args["previous_response_id"] == "resp_latest1234567890abcdef1234567890" + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.get_assistant_by_id") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") +def test_responses_endpoint_with_ancestor_conversation_not_found( + mock_get_conversation_by_ancestor_id, + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_assistant, + mock_get_credential, + mock_openai, + db, + user_api_key_header: dict[str, str], +): + """Test the /responses endpoint when no conversation is found by ancestor ID.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock assistant + mock_assistant = MagicMock() + mock_assistant.model = "gpt-4o" + mock_assistant.instructions = "Test instructions" + mock_assistant.temperature = 0.1 + mock_assistant.vector_store_ids = ["vs_test"] + mock_assistant.max_num_results = 20 + mock_get_assistant.return_value = mock_assistant + + # Setup mock OpenAI client + mock_client = MagicMock() + mock_openai.return_value = mock_client + + # Setup the mock response object + mock_response = MagicMock() + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" + mock_response.output_text = "Test output" + mock_response.model = "gpt-4o" + mock_response.usage.input_tokens = 10 + mock_response.usage.output_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_response.output = [] + mock_response.previous_response_id = "resp_ancestor1234567890abcdef1234567890" + mock_client.responses.create.return_value = mock_response + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_ancestor1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + # Setup mock conversation not found by ancestor ID + mock_get_conversation_by_ancestor_id.return_value = None + + # Get the Dalgo project ID + dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() + if not dalgo_project: + pytest.skip("Dalgo project not found in the database") + + request_data = { + "assistant_id": "assistant_dalgo", + "question": "What is Dalgo?", + "callback_url": "http://example.com/callback", + "response_id": "resp_ancestor1234567890abcdef1234567890", + } + + response = client.post("/responses", json=request_data, headers=user_api_key_header) + + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is True + assert response_json["data"]["status"] == "processing" + assert response_json["data"]["message"] == "Response creation started" + + # Verify get_conversation_by_ancestor_id was called with correct parameters + mock_get_conversation_by_ancestor_id.assert_called_once() + call_args = mock_get_conversation_by_ancestor_id.call_args + assert ( + call_args[1]["ancestor_response_id"] + == "resp_ancestor1234567890abcdef1234567890" + ) + assert call_args[1]["project_id"] == dalgo_project.id + + # Verify OpenAI client was called with the original response_id as previous_response_id + mock_client.responses.create.assert_called_once() + call_args = mock_client.responses.create.call_args[1] + assert ( + call_args["previous_response_id"] == "resp_ancestor1234567890abcdef1234567890" + ) + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.get_assistant_by_id") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") +def test_responses_endpoint_without_response_id( + mock_get_conversation_by_ancestor_id, + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_assistant, + mock_get_credential, + mock_openai, + db, + user_api_key_header: dict[str, str], +): + """Test the /responses endpoint when no response_id is provided.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock assistant + mock_assistant = MagicMock() + mock_assistant.model = "gpt-4o" + mock_assistant.instructions = "Test instructions" + mock_assistant.temperature = 0.1 + mock_assistant.vector_store_ids = ["vs_test"] + mock_assistant.max_num_results = 20 + mock_get_assistant.return_value = mock_assistant + + # Setup mock OpenAI client + mock_client = MagicMock() + mock_openai.return_value = mock_client + + # Setup the mock response object + mock_response = MagicMock() + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" + mock_response.output_text = "Test output" + mock_response.model = "gpt-4o" + mock_response.usage.input_tokens = 10 + mock_response.usage.output_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_response.output = [] + mock_response.previous_response_id = None + mock_client.responses.create.return_value = mock_response + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_1234567890abcdef1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + # Get the Dalgo project ID + dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() + if not dalgo_project: + pytest.skip("Dalgo project not found in the database") + + request_data = { + "assistant_id": "assistant_dalgo", + "question": "What is Dalgo?", + "callback_url": "http://example.com/callback", + # No response_id provided + } + + response = client.post("/responses", json=request_data, headers=user_api_key_header) + + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is True + assert response_json["data"]["status"] == "processing" + assert response_json["data"]["message"] == "Response creation started" + + # Verify get_conversation_by_ancestor_id was not called since response_id is None + mock_get_conversation_by_ancestor_id.assert_not_called() + + # Verify OpenAI client was called with None as previous_response_id + mock_client.responses.create.assert_called_once() + call_args = mock_client.responses.create.call_args[1] + assert call_args["previous_response_id"] is None + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") +def test_responses_sync_endpoint_with_ancestor_conversation_found( + mock_get_conversation_by_ancestor_id, + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_credential, + mock_openai, + db, + user_api_key_header: dict[str, str], +): + """Test the /responses/sync endpoint when a conversation is found by ancestor ID.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock OpenAI client + mock_client = MagicMock() + mock_openai.return_value = mock_client + + # Setup the mock response object + mock_response = MagicMock() + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" + mock_response.output_text = "Test output" + mock_response.model = "gpt-4o" + mock_response.usage.input_tokens = 10 + mock_response.usage.output_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_response.output = [] + mock_response.previous_response_id = "resp_ancestor1234567890abcdef1234567890" + mock_client.responses.create.return_value = mock_response + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_ancestor1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + # Setup mock conversation found by ancestor ID + mock_conversation = MagicMock() + mock_conversation.response_id = "resp_latest1234567890abcdef1234567890" + mock_get_conversation_by_ancestor_id.return_value = mock_conversation + + # Get the Dalgo project ID + dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() + if not dalgo_project: + pytest.skip("Dalgo project not found in the database") + + request_data = { + "model": "gpt-4o", + "instructions": "Test instructions", + "vector_store_ids": ["vs_test"], + "max_num_results": 20, + "temperature": 0.1, + "question": "What is Dalgo?", + "response_id": "resp_ancestor1234567890abcdef1234567890", + } + + response = client.post( + "/responses/sync", json=request_data, headers=user_api_key_header + ) + + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is True + assert response_json["data"]["status"] == "success" + assert ( + response_json["data"]["response_id"] + == "resp_1234567890abcdef1234567890abcdef1234567890" + ) + + # Verify get_conversation_by_ancestor_id was called with correct parameters + mock_get_conversation_by_ancestor_id.assert_called_once() + call_args = mock_get_conversation_by_ancestor_id.call_args + assert ( + call_args[1]["ancestor_response_id"] + == "resp_ancestor1234567890abcdef1234567890" + ) + assert call_args[1]["project_id"] == dalgo_project.id + + # Verify OpenAI client was called with the conversation's response_id as previous_response_id + mock_client.responses.create.assert_called_once() + call_args = mock_client.responses.create.call_args[1] + assert call_args["previous_response_id"] == "resp_latest1234567890abcdef1234567890" + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") +def test_responses_sync_endpoint_with_ancestor_conversation_not_found( + mock_get_conversation_by_ancestor_id, + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_credential, + mock_openai, + db, + user_api_key_header: dict[str, str], +): + """Test the /responses/sync endpoint when no conversation is found by ancestor ID.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock OpenAI client + mock_client = MagicMock() + mock_openai.return_value = mock_client + + # Setup the mock response object + mock_response = MagicMock() + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" + mock_response.output_text = "Test output" + mock_response.model = "gpt-4o" + mock_response.usage.input_tokens = 10 + mock_response.usage.output_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_response.output = [] + mock_response.previous_response_id = "resp_ancestor1234567890abcdef1234567890" + mock_client.responses.create.return_value = mock_response + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_ancestor1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + # Setup mock conversation not found by ancestor ID + mock_get_conversation_by_ancestor_id.return_value = None + + # Get the Dalgo project ID + dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() + if not dalgo_project: + pytest.skip("Dalgo project not found in the database") + + request_data = { + "model": "gpt-4o", + "instructions": "Test instructions", + "vector_store_ids": ["vs_test"], + "max_num_results": 20, + "temperature": 0.1, + "question": "What is Dalgo?", + "response_id": "resp_ancestor1234567890abcdef1234567890", + } + + response = client.post( + "/responses/sync", json=request_data, headers=user_api_key_header + ) + + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is True + assert response_json["data"]["status"] == "success" + assert ( + response_json["data"]["response_id"] + == "resp_1234567890abcdef1234567890abcdef1234567890" + ) + + # Verify get_conversation_by_ancestor_id was called with correct parameters + mock_get_conversation_by_ancestor_id.assert_called_once() + call_args = mock_get_conversation_by_ancestor_id.call_args + assert ( + call_args[1]["ancestor_response_id"] + == "resp_ancestor1234567890abcdef1234567890" + ) + assert call_args[1]["project_id"] == dalgo_project.id + + # Verify OpenAI client was called with the original response_id as previous_response_id + mock_client.responses.create.assert_called_once() + call_args = mock_client.responses.create.call_args[1] + assert ( + call_args["previous_response_id"] == "resp_ancestor1234567890abcdef1234567890" + ) + + +@patch("app.api.routes.responses.OpenAI") +@patch("app.api.routes.responses.get_provider_credential") +@patch("app.api.routes.responses.LangfuseTracer") +@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.create_conversation") +@patch("app.api.routes.responses.get_conversation_by_ancestor_id") +def test_responses_sync_endpoint_without_response_id( + mock_get_conversation_by_ancestor_id, + mock_create_conversation, + mock_set_ancestor_response_id, + mock_tracer_class, + mock_get_credential, + mock_openai, + db, + user_api_key_header: dict[str, str], +): + """Test the /responses/sync endpoint when no response_id is provided.""" + # Setup mock credentials + mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock OpenAI client + mock_client = MagicMock() + mock_openai.return_value = mock_client + + # Setup the mock response object + mock_response = MagicMock() + mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" + mock_response.output_text = "Test output" + mock_response.model = "gpt-4o" + mock_response.usage.input_tokens = 10 + mock_response.usage.output_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_response.output = [] + mock_response.previous_response_id = None + mock_client.responses.create.return_value = mock_response + + # Setup mock tracer + mock_tracer = MagicMock() + mock_tracer_class.return_value = mock_tracer + + # Setup mock CRUD functions + mock_set_ancestor_response_id.return_value = ( + "resp_1234567890abcdef1234567890abcdef1234567890" + ) + mock_create_conversation.return_value = None + + # Get the Dalgo project ID + dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() + if not dalgo_project: + pytest.skip("Dalgo project not found in the database") + + request_data = { + "model": "gpt-4o", + "instructions": "Test instructions", + "vector_store_ids": ["vs_test"], + "max_num_results": 20, + "temperature": 0.1, + "question": "What is Dalgo?", + # No response_id provided + } + + response = client.post( + "/responses/sync", json=request_data, headers=user_api_key_header + ) + + assert response.status_code == 200 + response_json = response.json() + assert response_json["success"] is True + assert response_json["data"]["status"] == "success" + assert ( + response_json["data"]["response_id"] + == "resp_1234567890abcdef1234567890abcdef1234567890" + ) + + # Verify get_conversation_by_ancestor_id was not called since response_id is None + mock_get_conversation_by_ancestor_id.assert_not_called() + + # Verify OpenAI client was called with None as previous_response_id + mock_client.responses.create.assert_called_once() + call_args = mock_client.responses.create.call_args[1] + assert call_args["previous_response_id"] is None From 468a812b78625f529023de4803cb0056e4b744a4 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 11:26:53 +0530 Subject: [PATCH 12/29] added logic for ancestor lookup --- backend/app/tests/utils/conversation.py | 80 - backend/coverage.xml | 8075 +++++++++++++++++++++++ 2 files changed, 8075 insertions(+), 80 deletions(-) delete mode 100644 backend/app/tests/utils/conversation.py create mode 100644 backend/coverage.xml diff --git a/backend/app/tests/utils/conversation.py b/backend/app/tests/utils/conversation.py deleted file mode 100644 index e8363ccd..00000000 --- a/backend/app/tests/utils/conversation.py +++ /dev/null @@ -1,80 +0,0 @@ -from uuid import uuid4 -from sqlmodel import Session, select - -from app.models import OpenAIConversation, OpenAIConversationCreate -from app.crud.openai_conversation import create_conversation - - -def get_conversation( - session: Session, response_id: str | None = None, project_id: int | None = None -) -> OpenAIConversation: - """ - Retrieve an active conversation from the database. - - If a response_id is provided, fetch the active conversation with that response_id. - If a project_id is provided, fetch a conversation from that specific project. - If no response_id or project_id is provided, fetch any random conversation. - """ - if response_id: - statement = ( - select(OpenAIConversation) - .where( - OpenAIConversation.response_id == response_id, - OpenAIConversation.is_deleted == False, - ) - .limit(1) - ) - elif project_id: - statement = ( - select(OpenAIConversation) - .where( - OpenAIConversation.project_id == project_id, - OpenAIConversation.is_deleted == False, - ) - .limit(1) - ) - else: - statement = ( - select(OpenAIConversation) - .where(OpenAIConversation.is_deleted == False) - .limit(1) - ) - - conversation = session.exec(statement).first() - - if not conversation: - # Create a new conversation if none exists - from app.tests.utils.utils import get_project, get_organization - - if project_id: - # Get the specific project - from app.models import Project - - project = session.exec( - select(Project).where(Project.id == project_id) - ).first() - if not project: - raise ValueError(f"Project with ID {project_id} not found") - else: - project = get_project(session) - - organization = get_organization(session) - - conversation_data = OpenAIConversationCreate( - response_id=f"resp_{uuid4()}", - ancestor_response_id=None, - previous_response_id=None, - user_question="Test question", - response="Test response", - model="gpt-4o", - assistant_id=f"asst_{uuid4()}", - ) - - conversation = create_conversation( - session=session, - conversation=conversation_data, - project_id=project.id, - organization_id=organization.id, - ) - - return conversation diff --git a/backend/coverage.xml b/backend/coverage.xml new file mode 100644 index 00000000..2813a93b --- /dev/null +++ b/backend/coverage.xml @@ -0,0 +1,8075 @@ + + + + + + /Users/akhileshnegi/Projects/ai-platform/backend + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 1fd09b563a69ff9d60e615caaa5c9503086fc50b Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 11:28:23 +0530 Subject: [PATCH 13/29] removed xml file --- backend/coverage.xml | 8075 ------------------------------------------ 1 file changed, 8075 deletions(-) delete mode 100644 backend/coverage.xml diff --git a/backend/coverage.xml b/backend/coverage.xml deleted file mode 100644 index 2813a93b..00000000 --- a/backend/coverage.xml +++ /dev/null @@ -1,8075 +0,0 @@ - - - - - - /Users/akhileshnegi/Projects/ai-platform/backend - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - From 751dbe5de27b57c6676de7b8e1438899f3324823 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 11:34:30 +0530 Subject: [PATCH 14/29] cleanups --- backend/app/api/routes/threads.py | 2 +- backend/app/crud/openai_conversation.py | 4 +++- backend/app/tests/api/routes/test_openai_conversation.py | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/backend/app/api/routes/threads.py b/backend/app/api/routes/threads.py index 1f1d28b5..90fe0415 100644 --- a/backend/app/api/routes/threads.py +++ b/backend/app/api/routes/threads.py @@ -39,7 +39,7 @@ def send_callback(callback_url: str, data: dict): try: session = requests.Session() # uncomment this to run locally without SSL - session.verify = False + # session.verify = False response = session.post(callback_url, json=data) response.raise_for_status() return True diff --git a/backend/app/crud/openai_conversation.py b/backend/app/crud/openai_conversation.py index d12de82a..39f84f29 100644 --- a/backend/app/crud/openai_conversation.py +++ b/backend/app/crud/openai_conversation.py @@ -1,5 +1,5 @@ import logging -from typing import List, Optional +from typing import Optional from sqlmodel import Session, select, func from app.models import OpenAIConversation, OpenAIConversationCreate @@ -57,6 +57,7 @@ def get_conversation_by_ancestor_id( result = session.exec(statement).first() return result + def set_ancestor_response_id( session: Session, current_response_id: str, @@ -97,6 +98,7 @@ def set_ancestor_response_id( # If not found, ancestor_response_id = previous_response_id return previous_response_id + def get_conversations_count_by_project( session: Session, project_id: int, diff --git a/backend/app/tests/api/routes/test_openai_conversation.py b/backend/app/tests/api/routes/test_openai_conversation.py index 84170888..1d0123d9 100644 --- a/backend/app/tests/api/routes/test_openai_conversation.py +++ b/backend/app/tests/api/routes/test_openai_conversation.py @@ -100,7 +100,6 @@ def test_get_conversation_by_response_id_success( def test_get_conversation_by_response_id_not_found( client: TestClient, user_api_key: APIKeyPublic, - ): """Test conversation retrieval with non-existent response ID.""" response = client.get( @@ -469,6 +468,7 @@ def test_delete_conversation_not_found( response_data = response.json() assert "not found" in response_data["error"] + def test_get_conversation_unauthorized_no_api_key( client: TestClient, db: Session, @@ -529,4 +529,4 @@ def test_delete_conversation_unauthorized_invalid_api_key( "/api/v1/openai-conversation/1", headers={"X-API-KEY": "invalid_api_key"}, ) - assert response.status_code == 401 \ No newline at end of file + assert response.status_code == 401 From e69cc32250a4d12d85562ea49f36105df620378e Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 11:42:52 +0530 Subject: [PATCH 15/29] coderabbit cleanups --- backend/app/crud/openai_conversation.py | 2 +- backend/app/tests/api/routes/test_openai_conversation.py | 3 --- backend/app/tests/scripts/test_backend_pre_start.py | 2 +- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/backend/app/crud/openai_conversation.py b/backend/app/crud/openai_conversation.py index 39f84f29..efeaccc6 100644 --- a/backend/app/crud/openai_conversation.py +++ b/backend/app/crud/openai_conversation.py @@ -61,7 +61,7 @@ def get_conversation_by_ancestor_id( def set_ancestor_response_id( session: Session, current_response_id: str, - previous_response_id: Optional[str], + previous_response_id: str | None, project_id: int, ) -> str: """ diff --git a/backend/app/tests/api/routes/test_openai_conversation.py b/backend/app/tests/api/routes/test_openai_conversation.py index 1d0123d9..dafc569f 100644 --- a/backend/app/tests/api/routes/test_openai_conversation.py +++ b/backend/app/tests/api/routes/test_openai_conversation.py @@ -1,7 +1,4 @@ -import pytest -from uuid import uuid4 from sqlmodel import Session -from fastapi import HTTPException from fastapi.testclient import TestClient from app.crud.openai_conversation import create_conversation diff --git a/backend/app/tests/scripts/test_backend_pre_start.py b/backend/app/tests/scripts/test_backend_pre_start.py index 732b8af2..9b134c3c 100644 --- a/backend/app/tests/scripts/test_backend_pre_start.py +++ b/backend/app/tests/scripts/test_backend_pre_start.py @@ -20,4 +20,4 @@ def test_init_success(): assert ( connection_successful ), "The database connection should be successful and not raise an exception." - mock_session.exec.assert_called_once_with(fake_select) \ No newline at end of file + mock_session.exec.assert_called_once_with(fake_select) From a796c3c8205bcb14faba8900766aac0b6924949a Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 12:37:13 +0530 Subject: [PATCH 16/29] consistency updating db entry --- backend/app/api/routes/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index f8ab0831..2cb4b108 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -207,7 +207,7 @@ def process_response( # Create conversation record in database conversation_data = OpenAIConversationCreate( response_id=response.id, - previous_response_id=request.response_id, + previous_response_id=response.previous_response_id, ancestor_response_id=ancestor_response_id, user_question=request.question, response=response.output_text, From 6b7b55922e7a3d39b02063241f9aef1045fc3fc3 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 13:56:28 +0530 Subject: [PATCH 17/29] renaming function --- backend/app/api/routes/openai_conversation.py | 3 - backend/app/api/routes/responses.py | 6 +- backend/app/crud/openai_conversation.py | 2 +- .../app/tests/api/routes/test_responses.py | 60 +++++++++---------- .../tests/crud/test_openai_conversation.py | 40 ++++++------- 5 files changed, 54 insertions(+), 57 deletions(-) diff --git a/backend/app/api/routes/openai_conversation.py b/backend/app/api/routes/openai_conversation.py index dad34a43..71f0c730 100644 --- a/backend/app/api/routes/openai_conversation.py +++ b/backend/app/api/routes/openai_conversation.py @@ -10,13 +10,10 @@ get_conversation_by_ancestor_id, get_conversations_by_project, get_conversations_count_by_project, - create_conversation, delete_conversation, ) from app.models import ( UserProjectOrg, - OpenAIConversationCreate, - OpenAIConversation, OpenAIConversationPublic, ) from app.utils import APIResponse diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index 2cb4b108..6e37cdd9 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -13,7 +13,7 @@ from app.crud.credentials import get_provider_credential from app.crud.openai_conversation import ( create_conversation, - set_ancestor_response_id, + get_ancestor_id_from_response, get_conversation_by_ancestor_id, ) from app.models import UserProjectOrg, OpenAIConversationCreate @@ -197,7 +197,7 @@ def process_response( ) # Set ancestor_response_id using CRUD function - ancestor_response_id = set_ancestor_response_id( + ancestor_response_id = get_ancestor_id_from_response( session=session, current_response_id=response.id, previous_response_id=response.previous_response_id, @@ -447,7 +447,7 @@ async def responses_sync( ) # Set ancestor_response_id using CRUD function for sync endpoint - ancestor_response_id = set_ancestor_response_id( + ancestor_response_id = get_ancestor_id_from_response( session=_session, current_response_id=response.id, previous_response_id=response.previous_response_id, diff --git a/backend/app/crud/openai_conversation.py b/backend/app/crud/openai_conversation.py index efeaccc6..7ef127b4 100644 --- a/backend/app/crud/openai_conversation.py +++ b/backend/app/crud/openai_conversation.py @@ -58,7 +58,7 @@ def get_conversation_by_ancestor_id( return result -def set_ancestor_response_id( +def get_ancestor_id_from_response( session: Session, current_response_id: str, previous_response_id: str | None, diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index b827506f..9316d077 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -19,13 +19,13 @@ @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.get_assistant_by_id") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_success( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_assistant, mock_get_credential, @@ -67,7 +67,7 @@ def test_responses_endpoint_success( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_ancestor1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -96,13 +96,13 @@ def test_responses_endpoint_success( @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.get_assistant_by_id") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_without_vector_store( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_assistant, mock_get_credential, @@ -144,7 +144,7 @@ def test_responses_endpoint_without_vector_store( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_ancestor1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -268,13 +268,13 @@ def test_responses_endpoint_missing_api_key_in_credentials( @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_sync_endpoint_success( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_credential, mock_openai, @@ -306,7 +306,7 @@ def test_responses_sync_endpoint_success( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_ancestor1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -407,13 +407,13 @@ def test_responses_sync_endpoint_openai_error( @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.get_assistant_by_id") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_with_file_search_results( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_assistant, mock_get_credential, @@ -468,7 +468,7 @@ def test_responses_endpoint_with_file_search_results( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_ancestor1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -506,13 +506,13 @@ def test_responses_endpoint_with_file_search_results( @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.get_assistant_by_id") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_with_ancestor_conversation_found( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_assistant, mock_get_credential, @@ -554,7 +554,7 @@ def test_responses_endpoint_with_ancestor_conversation_found( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_ancestor1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -603,13 +603,13 @@ def test_responses_endpoint_with_ancestor_conversation_found( @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.get_assistant_by_id") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_with_ancestor_conversation_not_found( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_assistant, mock_get_credential, @@ -651,7 +651,7 @@ def test_responses_endpoint_with_ancestor_conversation_not_found( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_ancestor1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -700,13 +700,13 @@ def test_responses_endpoint_with_ancestor_conversation_not_found( @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.get_assistant_by_id") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_without_response_id( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_assistant, mock_get_credential, @@ -748,7 +748,7 @@ def test_responses_endpoint_without_response_id( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_1234567890abcdef1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -785,13 +785,13 @@ def test_responses_endpoint_without_response_id( @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_sync_endpoint_with_ancestor_conversation_found( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_credential, mock_openai, @@ -823,7 +823,7 @@ def test_responses_sync_endpoint_with_ancestor_conversation_found( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_ancestor1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -879,13 +879,13 @@ def test_responses_sync_endpoint_with_ancestor_conversation_found( @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_sync_endpoint_with_ancestor_conversation_not_found( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_credential, mock_openai, @@ -917,7 +917,7 @@ def test_responses_sync_endpoint_with_ancestor_conversation_not_found( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_ancestor1234567890abcdef1234567890" ) mock_create_conversation.return_value = None @@ -973,13 +973,13 @@ def test_responses_sync_endpoint_with_ancestor_conversation_not_found( @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.set_ancestor_response_id") +@patch("app.api.routes.responses.get_ancestor_id_from_response") @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_sync_endpoint_without_response_id( mock_get_conversation_by_ancestor_id, mock_create_conversation, - mock_set_ancestor_response_id, + mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_credential, mock_openai, @@ -1011,7 +1011,7 @@ def test_responses_sync_endpoint_without_response_id( mock_tracer_class.return_value = mock_tracer # Setup mock CRUD functions - mock_set_ancestor_response_id.return_value = ( + mock_get_ancestor_id_from_response.return_value = ( "resp_1234567890abcdef1234567890abcdef1234567890" ) mock_create_conversation.return_value = None diff --git a/backend/app/tests/crud/test_openai_conversation.py b/backend/app/tests/crud/test_openai_conversation.py index 614dcce3..155d13b2 100644 --- a/backend/app/tests/crud/test_openai_conversation.py +++ b/backend/app/tests/crud/test_openai_conversation.py @@ -7,7 +7,7 @@ get_conversation_by_response_id, get_conversation_by_ancestor_id, get_conversations_by_project, - set_ancestor_response_id, + get_ancestor_id_from_response, get_conversations_count_by_project, create_conversation, delete_conversation, @@ -325,12 +325,12 @@ def test_conversation_soft_delete_behavior(db: Session): assert conversation.id not in [c.id for c in conversations] -def test_set_ancestor_response_id_no_previous_response(db: Session): - """Test set_ancestor_response_id when previous_response_id is None.""" +def test_get_ancestor_id_from_response_no_previous_response(db: Session): + """Test get_ancestor_id_from_response when previous_response_id is None.""" project = get_project(db) current_response_id = f"resp_{uuid4()}" - ancestor_id = set_ancestor_response_id( + ancestor_id = get_ancestor_id_from_response( session=db, current_response_id=current_response_id, previous_response_id=None, @@ -340,13 +340,13 @@ def test_set_ancestor_response_id_no_previous_response(db: Session): assert ancestor_id == current_response_id -def test_set_ancestor_response_id_previous_not_found(db: Session): - """Test set_ancestor_response_id when previous_response_id is not found in DB.""" +def test_get_ancestor_id_from_response_previous_not_found(db: Session): + """Test get_ancestor_id_from_response when previous_response_id is not found in DB.""" project = get_project(db) current_response_id = f"resp_{uuid4()}" previous_response_id = f"resp_{uuid4()}" - ancestor_id = set_ancestor_response_id( + ancestor_id = get_ancestor_id_from_response( session=db, current_response_id=current_response_id, previous_response_id=previous_response_id, @@ -357,8 +357,8 @@ def test_set_ancestor_response_id_previous_not_found(db: Session): assert ancestor_id == previous_response_id -def test_set_ancestor_response_id_previous_found_with_ancestor(db: Session): - """Test set_ancestor_response_id when previous_response_id is found and has an ancestor.""" +def test_get_ancestor_id_from_response_previous_found_with_ancestor(db: Session): + """Test get_ancestor_id_from_response when previous_response_id is found and has an ancestor.""" project = get_project(db) organization = get_organization(db) @@ -404,7 +404,7 @@ def test_set_ancestor_response_id_previous_found_with_ancestor(db: Session): # Test the current conversation current_response_id = f"resp_{uuid4()}" - ancestor_id = set_ancestor_response_id( + ancestor_id = get_ancestor_id_from_response( session=db, current_response_id=current_response_id, previous_response_id=previous_response_id, @@ -478,7 +478,7 @@ def test_get_conversations_count_by_project_excludes_deleted(db: Session): # Test the current conversation current_response_id = generate_openai_id("resp_", 40) - ancestor_id = set_ancestor_response_id( + ancestor_id = get_ancestor_id_from_response( session=db, current_response_id=current_response_id, previous_response_id=conversation.response_id, @@ -488,8 +488,8 @@ def test_get_conversations_count_by_project_excludes_deleted(db: Session): assert ancestor_id == conversation.ancestor_response_id -def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): - """Test set_ancestor_response_id when previous_response_id is found but has no ancestor.""" +def test_get_ancestor_id_from_response_previous_found_without_ancestor(db: Session): + """Test get_ancestor_id_from_response when previous_response_id is found but has no ancestor.""" project = get_project(db) organization = get_organization(db) @@ -514,7 +514,7 @@ def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): # Test the current conversation current_response_id = generate_openai_id("resp_", 40) - ancestor_id = set_ancestor_response_id( + ancestor_id = get_ancestor_id_from_response( session=db, current_response_id=current_response_id, previous_response_id=previous_response_id, @@ -525,8 +525,8 @@ def test_set_ancestor_response_id_previous_found_without_ancestor(db: Session): assert ancestor_id == previous_response_id -def test_set_ancestor_response_id_different_project(db: Session): - """Test set_ancestor_response_id respects project scoping.""" +def test_get_ancestor_id_from_response_different_project(db: Session): + """Test get_ancestor_id_from_response respects project scoping.""" project1 = get_project(db) organization = get_organization(db) @@ -564,7 +564,7 @@ def test_set_ancestor_response_id_different_project(db: Session): # Test looking for it in project2 (should not find it) current_response_id = generate_openai_id("resp_", 40) - ancestor_id = set_ancestor_response_id( + ancestor_id = get_ancestor_id_from_response( session=db, current_response_id=current_response_id, previous_response_id=previous_response_id, @@ -575,8 +575,8 @@ def test_set_ancestor_response_id_different_project(db: Session): assert ancestor_id == previous_response_id -def test_set_ancestor_response_id_complex_chain(db: Session): - """Test set_ancestor_response_id with a complex conversation chain.""" +def test_get_ancestor_id_from_response_complex_chain(db: Session): + """Test get_ancestor_id_from_response with a complex conversation chain.""" project = get_project(db) organization = get_organization(db) @@ -640,7 +640,7 @@ def test_set_ancestor_response_id_complex_chain(db: Session): # Test D referencing C response_d = generate_openai_id("resp_", 40) - ancestor_id = set_ancestor_response_id( + ancestor_id = get_ancestor_id_from_response( session=db, current_response_id=response_d, previous_response_id=response_c, From afb0ce7695f4e422cedc6b57ee90e08a20508517 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 14:07:10 +0530 Subject: [PATCH 18/29] cleanups based on review --- backend/app/api/routes/responses.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index 6e37cdd9..6acb6394 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -215,20 +215,12 @@ def process_response( assistant_id=request.assistant_id, ) - try: - create_conversation( - session=session, - conversation=conversation_data, - project_id=project_id, - organization_id=organization_id, - ) - logger.info( - f"Created conversation record for response_id={response.id}, assistant_id={mask_string(request.assistant_id)}, project_id={project_id}" - ) - except Exception as e: - logger.error( - f"Failed to create conversation record for response_id={response.id}, assistant_id={mask_string(request.assistant_id)}, project_id={project_id}: {str(e)}" - ) + create_conversation( + session=session, + conversation=conversation_data, + project_id=project_id, + organization_id=organization_id, + ) request_dict = request.model_dump() callback_response = ResponsesAPIResponse.success_response( From 947546e7776caf7cf6d51208584b9cff83969816 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 14:15:41 +0530 Subject: [PATCH 19/29] cleanup testcases --- .../tests/crud/test_openai_conversation.py | 49 +++---------------- 1 file changed, 7 insertions(+), 42 deletions(-) diff --git a/backend/app/tests/crud/test_openai_conversation.py b/backend/app/tests/crud/test_openai_conversation.py index 155d13b2..890041b0 100644 --- a/backend/app/tests/crud/test_openai_conversation.py +++ b/backend/app/tests/crud/test_openai_conversation.py @@ -328,7 +328,7 @@ def test_conversation_soft_delete_behavior(db: Session): def test_get_ancestor_id_from_response_no_previous_response(db: Session): """Test get_ancestor_id_from_response when previous_response_id is None.""" project = get_project(db) - current_response_id = f"resp_{uuid4()}" + current_response_id = generate_openai_id("resp_", 40) ancestor_id = get_ancestor_id_from_response( session=db, @@ -343,8 +343,8 @@ def test_get_ancestor_id_from_response_no_previous_response(db: Session): def test_get_ancestor_id_from_response_previous_not_found(db: Session): """Test get_ancestor_id_from_response when previous_response_id is not found in DB.""" project = get_project(db) - current_response_id = f"resp_{uuid4()}" - previous_response_id = f"resp_{uuid4()}" + current_response_id = generate_openai_id("resp_", 40) + previous_response_id = generate_openai_id("resp_", 40) ancestor_id = get_ancestor_id_from_response( session=db, @@ -395,7 +395,7 @@ def test_get_ancestor_id_from_response_previous_found_with_ancestor(db: Session) assistant_id=generate_openai_id("asst_", 20), ) - previous_conversation = create_conversation( + create_conversation( session=db, conversation=previous_conversation_data, project_id=project.id, @@ -403,7 +403,7 @@ def test_get_ancestor_id_from_response_previous_found_with_ancestor(db: Session) ) # Test the current conversation - current_response_id = f"resp_{uuid4()}" + current_response_id = generate_openai_id("resp_", 40) ancestor_id = get_ancestor_id_from_response( session=db, current_response_id=current_response_id, @@ -453,47 +453,12 @@ def test_get_conversations_count_by_project_success(db: Session): assert updated_count == initial_count + 3 -def test_get_conversations_count_by_project_excludes_deleted(db: Session): - """Test that deleted conversations are not counted.""" - project = get_project(db) - organization = get_organization(db) - - # Create a conversation - conversation_data = OpenAIConversationCreate( - response_id=generate_openai_id("resp_", 40), - ancestor_response_id=generate_openai_id("resp_", 40), - previous_response_id=None, - user_question="Test question", - response="Test response", - model="gpt-4o", - assistant_id=generate_openai_id("asst_", 20), - ) - - conversation = create_conversation( - session=db, - conversation=conversation_data, - project_id=project.id, - organization_id=organization.id, - ) - - # Test the current conversation - current_response_id = generate_openai_id("resp_", 40) - ancestor_id = get_ancestor_id_from_response( - session=db, - current_response_id=current_response_id, - previous_response_id=conversation.response_id, - project_id=project.id, - ) - - assert ancestor_id == conversation.ancestor_response_id - - def test_get_ancestor_id_from_response_previous_found_without_ancestor(db: Session): """Test get_ancestor_id_from_response when previous_response_id is found but has no ancestor.""" project = get_project(db) organization = get_organization(db) - # Create a previous conversation without ancestor + # Create a previous conversation that is self-referencing previous_response_id = generate_openai_id("resp_", 40) previous_conversation_data = OpenAIConversationCreate( response_id=previous_response_id, @@ -505,7 +470,7 @@ def test_get_ancestor_id_from_response_previous_found_without_ancestor(db: Sessi assistant_id=generate_openai_id("asst_", 20), ) - previous_conversation = create_conversation( + create_conversation( session=db, conversation=previous_conversation_data, project_id=project.id, From bb5308a46e713e2a6cafe3c8eb483f2203d71da6 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 14:39:08 +0530 Subject: [PATCH 20/29] reducing db calls --- backend/app/api/routes/responses.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index 6acb6394..d756e6b2 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -196,19 +196,13 @@ def process_response( }, ) - # Set ancestor_response_id using CRUD function - ancestor_response_id = get_ancestor_id_from_response( - session=session, - current_response_id=response.id, - previous_response_id=response.previous_response_id, - project_id=project_id, - ) - # Create conversation record in database conversation_data = OpenAIConversationCreate( response_id=response.id, previous_response_id=response.previous_response_id, - ancestor_response_id=ancestor_response_id, + ancestor_response_id=latest_conversation.response_id + if latest_conversation + else None, user_question=request.question, response=response.output_text, model=response.model, From 1aaf4f1af6d86707a7ea144cb22761060566a974 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 14:52:35 +0530 Subject: [PATCH 21/29] remove unnecessary code --- backend/app/api/routes/responses.py | 49 ++--------------------------- 1 file changed, 2 insertions(+), 47 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index d756e6b2..0ad4f647 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -202,7 +202,7 @@ def process_response( previous_response_id=response.previous_response_id, ancestor_response_id=latest_conversation.response_id if latest_conversation - else None, + else response.id, user_question=request.question, response=response.output_text, model=response.model, @@ -380,20 +380,9 @@ async def responses_sync( ) try: - # Get the latest conversation by ancestor ID to use as previous_response_id - previous_response_id = request.response_id - if request.response_id: - latest_conversation = get_conversation_by_ancestor_id( - session=_session, - ancestor_response_id=request.response_id, - project_id=project_id, - ) - if latest_conversation: - previous_response_id = latest_conversation.response_id - response = client.responses.create( model=request.model, - previous_response_id=previous_response_id, + previous_response_id=request.response_id, instructions=request.instructions, tools=[ { @@ -432,40 +421,6 @@ async def responses_sync( }, ) - # Set ancestor_response_id using CRUD function for sync endpoint - ancestor_response_id = get_ancestor_id_from_response( - session=_session, - current_response_id=response.id, - previous_response_id=response.previous_response_id, - project_id=project_id, - ) - - # Create conversation record in database - conversation_data = OpenAIConversationCreate( - response_id=response.id, - previous_response_id=request.response_id, - ancestor_response_id=ancestor_response_id, - user_question=request.question, - response=response.output_text, - model=response.model, - assistant_id="sync_request", # For sync requests, we don't have assistant_id - ) - - try: - create_conversation( - session=_session, - conversation=conversation_data, - project_id=project_id, - organization_id=organization_id, - ) - logger.info( - f"Created conversation record for sync response_id={response.id}, project_id={project_id}" - ) - except Exception as e: - logger.error( - f"Failed to create conversation record for sync response_id={response.id}, project_id={project_id}: {str(e)}" - ) - tracer.flush() return ResponsesAPIResponse.success_response( From db0d2f7226fb5ffe00469046f0b010f87ef6f03e Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 15:36:23 +0530 Subject: [PATCH 22/29] cleanups --- backend/app/api/routes/responses.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index 0ad4f647..a88990ae 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -138,10 +138,11 @@ def process_response( try: # Get the latest conversation by ancestor ID to use as previous_response_id previous_response_id = request.response_id - if request.response_id: + latest_conversation = None + if previous_response_id: latest_conversation = get_conversation_by_ancestor_id( session=session, - ancestor_response_id=request.response_id, + ancestor_response_id=previous_response_id, project_id=project_id, ) if latest_conversation: @@ -195,14 +196,23 @@ def process_response( "error": None, }, ) + # Set ancestor_response_id using CRUD function + ancestor_response_id = ( + latest_conversation.ancestor_response_id + if latest_conversation + else get_ancestor_id_from_response( + session=session, + current_response_id=response.id, + previous_response_id=response.previous_response_id, + project_id=project_id, + ) + ) # Create conversation record in database conversation_data = OpenAIConversationCreate( response_id=response.id, previous_response_id=response.previous_response_id, - ancestor_response_id=latest_conversation.response_id - if latest_conversation - else response.id, + ancestor_response_id=ancestor_response_id, user_question=request.question, response=response.output_text, model=response.model, From e5234a74ba9f7ae4063e449f98219b803d7bdb73 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 16:54:57 +0530 Subject: [PATCH 23/29] fixing CI --- backend/app/tests/api/routes/test_responses.py | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index 9316d077..d2b16781 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -562,6 +562,7 @@ def test_responses_endpoint_with_ancestor_conversation_found( # Setup mock conversation found by ancestor ID mock_conversation = MagicMock() mock_conversation.response_id = "resp_latest1234567890abcdef1234567890" + mock_conversation.ancestor_response_id = "resp_ancestor1234567890abcdef1234567890" mock_get_conversation_by_ancestor_id.return_value = mock_conversation # Get the Dalgo project ID From dcb40b763d11892b11ef09df551903ed82e6883d Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 17:32:06 +0530 Subject: [PATCH 24/29] reverting testcases for response/sync --- .../app/tests/api/routes/test_responses.py | 77 ++----------------- 1 file changed, 8 insertions(+), 69 deletions(-) diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index d2b16781..7fc69a20 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -786,20 +786,14 @@ def test_responses_endpoint_without_response_id( @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.get_ancestor_id_from_response") -@patch("app.api.routes.responses.create_conversation") -@patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_sync_endpoint_with_ancestor_conversation_found( - mock_get_conversation_by_ancestor_id, - mock_create_conversation, - mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_credential, mock_openai, db, user_api_key_header: dict[str, str], ): - """Test the /responses/sync endpoint when a conversation is found by ancestor ID.""" + """Test the /responses/sync endpoint when a response_id is provided.""" # Setup mock credentials mock_get_credential.return_value = {"api_key": "test_api_key"} @@ -823,17 +817,6 @@ def test_responses_sync_endpoint_with_ancestor_conversation_found( mock_tracer = MagicMock() mock_tracer_class.return_value = mock_tracer - # Setup mock CRUD functions - mock_get_ancestor_id_from_response.return_value = ( - "resp_ancestor1234567890abcdef1234567890" - ) - mock_create_conversation.return_value = None - - # Setup mock conversation found by ancestor ID - mock_conversation = MagicMock() - mock_conversation.response_id = "resp_latest1234567890abcdef1234567890" - mock_get_conversation_by_ancestor_id.return_value = mock_conversation - # Get the Dalgo project ID dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() if not dalgo_project: @@ -862,38 +845,26 @@ def test_responses_sync_endpoint_with_ancestor_conversation_found( == "resp_1234567890abcdef1234567890abcdef1234567890" ) - # Verify get_conversation_by_ancestor_id was called with correct parameters - mock_get_conversation_by_ancestor_id.assert_called_once() - call_args = mock_get_conversation_by_ancestor_id.call_args - assert ( - call_args[1]["ancestor_response_id"] - == "resp_ancestor1234567890abcdef1234567890" - ) - assert call_args[1]["project_id"] == dalgo_project.id - - # Verify OpenAI client was called with the conversation's response_id as previous_response_id + # Verify OpenAI client was called with the original response_id as previous_response_id + # (sync endpoint doesn't do conversation lookup like async endpoint) mock_client.responses.create.assert_called_once() call_args = mock_client.responses.create.call_args[1] - assert call_args["previous_response_id"] == "resp_latest1234567890abcdef1234567890" + assert ( + call_args["previous_response_id"] == "resp_ancestor1234567890abcdef1234567890" + ) @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.get_ancestor_id_from_response") -@patch("app.api.routes.responses.create_conversation") -@patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_sync_endpoint_with_ancestor_conversation_not_found( - mock_get_conversation_by_ancestor_id, - mock_create_conversation, - mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_credential, mock_openai, db, user_api_key_header: dict[str, str], ): - """Test the /responses/sync endpoint when no conversation is found by ancestor ID.""" + """Test the /responses/sync endpoint when a response_id is provided.""" # Setup mock credentials mock_get_credential.return_value = {"api_key": "test_api_key"} @@ -917,15 +888,6 @@ def test_responses_sync_endpoint_with_ancestor_conversation_not_found( mock_tracer = MagicMock() mock_tracer_class.return_value = mock_tracer - # Setup mock CRUD functions - mock_get_ancestor_id_from_response.return_value = ( - "resp_ancestor1234567890abcdef1234567890" - ) - mock_create_conversation.return_value = None - - # Setup mock conversation not found by ancestor ID - mock_get_conversation_by_ancestor_id.return_value = None - # Get the Dalgo project ID dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() if not dalgo_project: @@ -954,16 +916,8 @@ def test_responses_sync_endpoint_with_ancestor_conversation_not_found( == "resp_1234567890abcdef1234567890abcdef1234567890" ) - # Verify get_conversation_by_ancestor_id was called with correct parameters - mock_get_conversation_by_ancestor_id.assert_called_once() - call_args = mock_get_conversation_by_ancestor_id.call_args - assert ( - call_args[1]["ancestor_response_id"] - == "resp_ancestor1234567890abcdef1234567890" - ) - assert call_args[1]["project_id"] == dalgo_project.id - # Verify OpenAI client was called with the original response_id as previous_response_id + # (sync endpoint doesn't do conversation lookup like async endpoint) mock_client.responses.create.assert_called_once() call_args = mock_client.responses.create.call_args[1] assert ( @@ -974,13 +928,7 @@ def test_responses_sync_endpoint_with_ancestor_conversation_not_found( @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.get_ancestor_id_from_response") -@patch("app.api.routes.responses.create_conversation") -@patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_sync_endpoint_without_response_id( - mock_get_conversation_by_ancestor_id, - mock_create_conversation, - mock_get_ancestor_id_from_response, mock_tracer_class, mock_get_credential, mock_openai, @@ -1011,12 +959,6 @@ def test_responses_sync_endpoint_without_response_id( mock_tracer = MagicMock() mock_tracer_class.return_value = mock_tracer - # Setup mock CRUD functions - mock_get_ancestor_id_from_response.return_value = ( - "resp_1234567890abcdef1234567890abcdef1234567890" - ) - mock_create_conversation.return_value = None - # Get the Dalgo project ID dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() if not dalgo_project: @@ -1045,9 +987,6 @@ def test_responses_sync_endpoint_without_response_id( == "resp_1234567890abcdef1234567890abcdef1234567890" ) - # Verify get_conversation_by_ancestor_id was not called since response_id is None - mock_get_conversation_by_ancestor_id.assert_not_called() - # Verify OpenAI client was called with None as previous_response_id mock_client.responses.create.assert_called_once() call_args = mock_client.responses.create.call_args[1] From adc41c392179c54574bbba8d0cf3af0282e73b50 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 17:36:03 +0530 Subject: [PATCH 25/29] coderabbit cleanups --- backend/app/tests/api/routes/test_responses.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index 7fc69a20..b68b542b 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -1,4 +1,4 @@ -from unittest.mock import MagicMock, patch, AsyncMock +from unittest.mock import MagicMock, patch import pytest from fastapi import FastAPI from fastapi.testclient import TestClient @@ -6,8 +6,7 @@ import openai from app.api.routes.responses import router -from app.models import Project, Assistant -from app.core.exception_handlers import HTTPException +from app.models import Project # Wrap the router in a FastAPI app instance app = FastAPI() @@ -23,7 +22,6 @@ @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_success( - mock_get_conversation_by_ancestor_id, mock_create_conversation, mock_get_ancestor_id_from_response, mock_tracer_class, From f8e38e584fa3780f431633a3805aaac512908c72 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 18:11:49 +0530 Subject: [PATCH 26/29] cleanup testcases --- .../app/tests/api/routes/test_responses.py | 25 ++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index b68b542b..37f03156 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -22,6 +22,7 @@ @patch("app.api.routes.responses.create_conversation") @patch("app.api.routes.responses.get_conversation_by_ancestor_id") def test_responses_endpoint_success( + mock_get_conversation_by_ancestor_id, mock_create_conversation, mock_get_ancestor_id_from_response, mock_tracer_class, @@ -32,8 +33,21 @@ def test_responses_endpoint_success( user_api_key_header: dict[str, str], ): """Test the /responses endpoint for successful response creation.""" - # Setup mock credentials - mock_get_credential.return_value = {"api_key": "test_api_key"} + + # Setup mock credentials - configure to return different values based on provider + def mock_credential_side_effect(*args, **kwargs): + provider = kwargs.get("provider") + if provider == "openai": + return {"api_key": "test_api_key"} + elif provider == "langfuse": + return { + "public_key": "test_public_key", + "secret_key": "test_secret_key", + "host": "https://cloud.langfuse.com", + } + return None + + mock_get_credential.side_effect = mock_credential_side_effect # Setup mock assistant mock_assistant = MagicMock() @@ -42,7 +56,12 @@ def test_responses_endpoint_success( mock_assistant.temperature = 0.1 mock_assistant.vector_store_ids = ["vs_test"] mock_assistant.max_num_results = 20 - mock_get_assistant.return_value = mock_assistant + + # Configure mock to return the assistant for any call + def mock_assistant_side_effect(*args, **kwargs): + return mock_assistant + + mock_get_assistant.side_effect = mock_assistant_side_effect # Setup mock OpenAI client mock_client = MagicMock() From 0df228a667690a793f1d753805359246c0082102 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 18:20:41 +0530 Subject: [PATCH 27/29] renaming --- backend/app/tests/api/routes/test_responses.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index 37f03156..6517d694 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -35,7 +35,7 @@ def test_responses_endpoint_success( """Test the /responses endpoint for successful response creation.""" # Setup mock credentials - configure to return different values based on provider - def mock_credential_side_effect(*args, **kwargs): + def mock_get_credentials_by_provider(*args, **kwargs): provider = kwargs.get("provider") if provider == "openai": return {"api_key": "test_api_key"} @@ -47,7 +47,7 @@ def mock_credential_side_effect(*args, **kwargs): } return None - mock_get_credential.side_effect = mock_credential_side_effect + mock_get_credential.side_effect = mock_get_credentials_by_provider # Setup mock assistant mock_assistant = MagicMock() @@ -58,10 +58,10 @@ def mock_credential_side_effect(*args, **kwargs): mock_assistant.max_num_results = 20 # Configure mock to return the assistant for any call - def mock_assistant_side_effect(*args, **kwargs): + def return_mock_assistant(*args, **kwargs): return mock_assistant - mock_get_assistant.side_effect = mock_assistant_side_effect + mock_get_assistant.side_effect = return_mock_assistant # Setup mock OpenAI client mock_client = MagicMock() From aeb224cdb7b6f843ff90f1d3d97b9bec31701624 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Tue, 29 Jul 2025 18:25:04 +0530 Subject: [PATCH 28/29] removed /sync testcases --- .../app/tests/api/routes/test_responses.py | 348 ------------------ 1 file changed, 348 deletions(-) diff --git a/backend/app/tests/api/routes/test_responses.py b/backend/app/tests/api/routes/test_responses.py index 6517d694..483119d5 100644 --- a/backend/app/tests/api/routes/test_responses.py +++ b/backend/app/tests/api/routes/test_responses.py @@ -282,144 +282,6 @@ def test_responses_endpoint_missing_api_key_in_credentials( assert "OpenAI API key not configured" in response_json["error"] -@patch("app.api.routes.responses.OpenAI") -@patch("app.api.routes.responses.get_provider_credential") -@patch("app.api.routes.responses.LangfuseTracer") -@patch("app.api.routes.responses.get_ancestor_id_from_response") -@patch("app.api.routes.responses.create_conversation") -@patch("app.api.routes.responses.get_conversation_by_ancestor_id") -def test_responses_sync_endpoint_success( - mock_get_conversation_by_ancestor_id, - mock_create_conversation, - mock_get_ancestor_id_from_response, - mock_tracer_class, - mock_get_credential, - mock_openai, - db, - user_api_key_header, -): - """Test the /responses/sync endpoint for successful response creation.""" - # Setup mock credentials - mock_get_credential.return_value = {"api_key": "test_api_key"} - - # Setup mock OpenAI client - mock_client = MagicMock() - mock_openai.return_value = mock_client - - # Setup the mock response object with proper response ID format - mock_response = MagicMock() - mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" - mock_response.output_text = "Test output" - mock_response.model = "gpt-4o" - mock_response.usage.input_tokens = 10 - mock_response.usage.output_tokens = 5 - mock_response.usage.total_tokens = 15 - mock_response.output = [] - mock_response.previous_response_id = None - mock_client.responses.create.return_value = mock_response - - # Setup mock tracer - mock_tracer = MagicMock() - mock_tracer_class.return_value = mock_tracer - - # Setup mock CRUD functions - mock_get_ancestor_id_from_response.return_value = ( - "resp_ancestor1234567890abcdef1234567890" - ) - mock_create_conversation.return_value = None - - request_data = { - "model": "gpt-4o", - "instructions": "Test instructions", - "vector_store_ids": ["vs_test"], - "max_num_results": 20, - "temperature": 0.1, - "question": "What is this?", - } - - response = client.post( - "/responses/sync", json=request_data, headers=user_api_key_header - ) - assert response.status_code == 200 - response_json = response.json() - assert response_json["success"] is True - assert response_json["data"]["status"] == "success" - assert ( - response_json["data"]["response_id"] - == "resp_1234567890abcdef1234567890abcdef1234567890" - ) - assert response_json["data"]["message"] == "Test output" - - -@patch("app.api.routes.responses.get_provider_credential") -def test_responses_sync_endpoint_no_openai_credentials( - mock_get_credential, - db, - user_api_key_header, -): - """Test the /responses/sync endpoint when OpenAI credentials are not configured.""" - # Setup mock credentials to return None (no credentials) - mock_get_credential.return_value = None - - request_data = { - "model": "gpt-4o", - "instructions": "Test instructions", - "vector_store_ids": ["vs_test"], - "max_num_results": 20, - "temperature": 0.1, - "question": "What is this?", - } - - response = client.post( - "/responses/sync", json=request_data, headers=user_api_key_header - ) - assert response.status_code == 200 - response_json = response.json() - assert response_json["success"] is False - assert "OpenAI API key not configured" in response_json["error"] - - -@patch("app.api.routes.responses.OpenAI") -@patch("app.api.routes.responses.get_provider_credential") -@patch("app.api.routes.responses.LangfuseTracer") -def test_responses_sync_endpoint_openai_error( - mock_tracer_class, - mock_get_credential, - mock_openai, - db, - user_api_key_header, -): - """Test the /responses/sync endpoint when OpenAI API returns an error.""" - # Setup mock credentials - mock_get_credential.return_value = {"api_key": "test_api_key"} - - # Setup mock OpenAI client that raises an error - mock_client = MagicMock() - mock_openai.return_value = mock_client - mock_client.responses.create.side_effect = openai.OpenAIError("OpenAI API error") - - # Setup mock tracer - mock_tracer = MagicMock() - mock_tracer_class.return_value = mock_tracer - - request_data = { - "model": "gpt-4o", - "instructions": "Test instructions", - "vector_store_ids": ["vs_test"], - "max_num_results": 20, - "temperature": 0.1, - "question": "What is this?", - } - - response = client.post( - "/responses/sync", json=request_data, headers=user_api_key_header - ) - assert response.status_code == 200 - response_json = response.json() - assert response_json["success"] is False - assert "OpenAI API error" in response_json["error"] - - @patch("app.api.routes.responses.OpenAI") @patch("app.api.routes.responses.get_provider_credential") @patch("app.api.routes.responses.get_assistant_by_id") @@ -798,213 +660,3 @@ def test_responses_endpoint_without_response_id( mock_client.responses.create.assert_called_once() call_args = mock_client.responses.create.call_args[1] assert call_args["previous_response_id"] is None - - -@patch("app.api.routes.responses.OpenAI") -@patch("app.api.routes.responses.get_provider_credential") -@patch("app.api.routes.responses.LangfuseTracer") -def test_responses_sync_endpoint_with_ancestor_conversation_found( - mock_tracer_class, - mock_get_credential, - mock_openai, - db, - user_api_key_header: dict[str, str], -): - """Test the /responses/sync endpoint when a response_id is provided.""" - # Setup mock credentials - mock_get_credential.return_value = {"api_key": "test_api_key"} - - # Setup mock OpenAI client - mock_client = MagicMock() - mock_openai.return_value = mock_client - - # Setup the mock response object - mock_response = MagicMock() - mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" - mock_response.output_text = "Test output" - mock_response.model = "gpt-4o" - mock_response.usage.input_tokens = 10 - mock_response.usage.output_tokens = 5 - mock_response.usage.total_tokens = 15 - mock_response.output = [] - mock_response.previous_response_id = "resp_ancestor1234567890abcdef1234567890" - mock_client.responses.create.return_value = mock_response - - # Setup mock tracer - mock_tracer = MagicMock() - mock_tracer_class.return_value = mock_tracer - - # Get the Dalgo project ID - dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() - if not dalgo_project: - pytest.skip("Dalgo project not found in the database") - - request_data = { - "model": "gpt-4o", - "instructions": "Test instructions", - "vector_store_ids": ["vs_test"], - "max_num_results": 20, - "temperature": 0.1, - "question": "What is Dalgo?", - "response_id": "resp_ancestor1234567890abcdef1234567890", - } - - response = client.post( - "/responses/sync", json=request_data, headers=user_api_key_header - ) - - assert response.status_code == 200 - response_json = response.json() - assert response_json["success"] is True - assert response_json["data"]["status"] == "success" - assert ( - response_json["data"]["response_id"] - == "resp_1234567890abcdef1234567890abcdef1234567890" - ) - - # Verify OpenAI client was called with the original response_id as previous_response_id - # (sync endpoint doesn't do conversation lookup like async endpoint) - mock_client.responses.create.assert_called_once() - call_args = mock_client.responses.create.call_args[1] - assert ( - call_args["previous_response_id"] == "resp_ancestor1234567890abcdef1234567890" - ) - - -@patch("app.api.routes.responses.OpenAI") -@patch("app.api.routes.responses.get_provider_credential") -@patch("app.api.routes.responses.LangfuseTracer") -def test_responses_sync_endpoint_with_ancestor_conversation_not_found( - mock_tracer_class, - mock_get_credential, - mock_openai, - db, - user_api_key_header: dict[str, str], -): - """Test the /responses/sync endpoint when a response_id is provided.""" - # Setup mock credentials - mock_get_credential.return_value = {"api_key": "test_api_key"} - - # Setup mock OpenAI client - mock_client = MagicMock() - mock_openai.return_value = mock_client - - # Setup the mock response object - mock_response = MagicMock() - mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" - mock_response.output_text = "Test output" - mock_response.model = "gpt-4o" - mock_response.usage.input_tokens = 10 - mock_response.usage.output_tokens = 5 - mock_response.usage.total_tokens = 15 - mock_response.output = [] - mock_response.previous_response_id = "resp_ancestor1234567890abcdef1234567890" - mock_client.responses.create.return_value = mock_response - - # Setup mock tracer - mock_tracer = MagicMock() - mock_tracer_class.return_value = mock_tracer - - # Get the Dalgo project ID - dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() - if not dalgo_project: - pytest.skip("Dalgo project not found in the database") - - request_data = { - "model": "gpt-4o", - "instructions": "Test instructions", - "vector_store_ids": ["vs_test"], - "max_num_results": 20, - "temperature": 0.1, - "question": "What is Dalgo?", - "response_id": "resp_ancestor1234567890abcdef1234567890", - } - - response = client.post( - "/responses/sync", json=request_data, headers=user_api_key_header - ) - - assert response.status_code == 200 - response_json = response.json() - assert response_json["success"] is True - assert response_json["data"]["status"] == "success" - assert ( - response_json["data"]["response_id"] - == "resp_1234567890abcdef1234567890abcdef1234567890" - ) - - # Verify OpenAI client was called with the original response_id as previous_response_id - # (sync endpoint doesn't do conversation lookup like async endpoint) - mock_client.responses.create.assert_called_once() - call_args = mock_client.responses.create.call_args[1] - assert ( - call_args["previous_response_id"] == "resp_ancestor1234567890abcdef1234567890" - ) - - -@patch("app.api.routes.responses.OpenAI") -@patch("app.api.routes.responses.get_provider_credential") -@patch("app.api.routes.responses.LangfuseTracer") -def test_responses_sync_endpoint_without_response_id( - mock_tracer_class, - mock_get_credential, - mock_openai, - db, - user_api_key_header: dict[str, str], -): - """Test the /responses/sync endpoint when no response_id is provided.""" - # Setup mock credentials - mock_get_credential.return_value = {"api_key": "test_api_key"} - - # Setup mock OpenAI client - mock_client = MagicMock() - mock_openai.return_value = mock_client - - # Setup the mock response object - mock_response = MagicMock() - mock_response.id = "resp_1234567890abcdef1234567890abcdef1234567890" - mock_response.output_text = "Test output" - mock_response.model = "gpt-4o" - mock_response.usage.input_tokens = 10 - mock_response.usage.output_tokens = 5 - mock_response.usage.total_tokens = 15 - mock_response.output = [] - mock_response.previous_response_id = None - mock_client.responses.create.return_value = mock_response - - # Setup mock tracer - mock_tracer = MagicMock() - mock_tracer_class.return_value = mock_tracer - - # Get the Dalgo project ID - dalgo_project = db.exec(select(Project).where(Project.name == "Dalgo")).first() - if not dalgo_project: - pytest.skip("Dalgo project not found in the database") - - request_data = { - "model": "gpt-4o", - "instructions": "Test instructions", - "vector_store_ids": ["vs_test"], - "max_num_results": 20, - "temperature": 0.1, - "question": "What is Dalgo?", - # No response_id provided - } - - response = client.post( - "/responses/sync", json=request_data, headers=user_api_key_header - ) - - assert response.status_code == 200 - response_json = response.json() - assert response_json["success"] is True - assert response_json["data"]["status"] == "success" - assert ( - response_json["data"]["response_id"] - == "resp_1234567890abcdef1234567890abcdef1234567890" - ) - - # Verify OpenAI client was called with None as previous_response_id - mock_client.responses.create.assert_called_once() - call_args = mock_client.responses.create.call_args[1] - assert call_args["previous_response_id"] is None From 3ef0162582d6fce57ff0e76d01afecb78325f06b Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Wed, 30 Jul 2025 09:35:37 +0530 Subject: [PATCH 29/29] cleanups --- backend/app/api/routes/responses.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/app/api/routes/responses.py b/backend/app/api/routes/responses.py index a88990ae..01361803 100644 --- a/backend/app/api/routes/responses.py +++ b/backend/app/api/routes/responses.py @@ -137,20 +137,20 @@ def process_response( try: # Get the latest conversation by ancestor ID to use as previous_response_id - previous_response_id = request.response_id + ancestor_id = request.response_id latest_conversation = None - if previous_response_id: + if ancestor_id: latest_conversation = get_conversation_by_ancestor_id( session=session, - ancestor_response_id=previous_response_id, + ancestor_response_id=ancestor_id, project_id=project_id, ) if latest_conversation: - previous_response_id = latest_conversation.response_id + ancestor_id = latest_conversation.response_id params = { "model": assistant.model, - "previous_response_id": previous_response_id, + "previous_response_id": ancestor_id, "instructions": assistant.instructions, "temperature": assistant.temperature, "input": [{"role": "user", "content": request.question}],