Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
77619b5
added migration
Jul 25, 2025
aeebc1f
first stab at CRUD
Jul 26, 2025
a2b001c
added ancestor id fetching
Jul 26, 2025
42d1d2c
removing create endpoint
Jul 27, 2025
7df16e8
initiating populating data
Jul 27, 2025
9432e0d
added lookup logic
Jul 27, 2025
c02b35d
updated business logic and testcases
Jul 27, 2025
10ccbb3
revert few updates
Jul 28, 2025
bafa85c
updated testcases
Jul 28, 2025
5e8ab64
Merge branch 'main' into enhancement/openai-conversation-lookup
AkhileshNegi Jul 28, 2025
29e7655
fixing testcases
Jul 29, 2025
08eadcd
Merge branch 'main' into enhancement/openai-conversation-lookup
AkhileshNegi Jul 29, 2025
6a5333d
added logic for ancestor lookup
Jul 29, 2025
c40a8e9
Merge branch 'enhancement/openai-conversation-lookup' of github.com:P…
Jul 29, 2025
468a812
added logic for ancestor lookup
Jul 29, 2025
1fd09b5
removed xml file
Jul 29, 2025
751dbe5
cleanups
Jul 29, 2025
e69cc32
coderabbit cleanups
Jul 29, 2025
a796c3c
consistency updating db entry
Jul 29, 2025
6b7b559
renaming function
Jul 29, 2025
afb0ce7
cleanups based on review
Jul 29, 2025
947546e
cleanup testcases
Jul 29, 2025
bb5308a
reducing db calls
Jul 29, 2025
1aaf4f1
remove unnecessary code
Jul 29, 2025
db0d2f7
cleanups
Jul 29, 2025
e5234a7
fixing CI
Jul 29, 2025
dcb40b7
reverting testcases for response/sync
Jul 29, 2025
adc41c3
coderabbit cleanups
Jul 29, 2025
f8e38e5
cleanup testcases
Jul 29, 2025
0df228a
renaming
Jul 29, 2025
aeb224c
removed /sync testcases
Jul 29, 2025
3ef0162
cleanups
Jul 30, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion backend/app/api/routes/openai_conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
get_conversation_by_ancestor_id,
get_conversations_by_project,
get_conversations_count_by_project,
create_conversation,
delete_conversation,
)
from app.models import (
Expand Down
68 changes: 65 additions & 3 deletions backend/app/api/routes/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,12 @@
from app.api.routes.threads import send_callback
from app.crud.assistants import get_assistant_by_id
from app.crud.credentials import get_provider_credential
from app.models import UserProjectOrg
from app.crud.openai_conversation import (
create_conversation,
get_ancestor_id_from_response,
get_conversation_by_ancestor_id,
)
from app.models import UserProjectOrg, OpenAIConversationCreate
from app.utils import APIResponse, mask_string
from app.core.langfuse.langfuse import LangfuseTracer

Expand All @@ -21,8 +26,20 @@

def handle_openai_error(e: openai.OpenAIError) -> str:
"""Extract error message from OpenAI error."""
if isinstance(e.body, dict) and "message" in e.body:
# Try to get error message from different possible attributes
if hasattr(e, "body") and isinstance(e.body, dict) and "message" in e.body:
return e.body["message"]
elif hasattr(e, "message"):
return e.message
elif hasattr(e, "response") and hasattr(e.response, "json"):
try:
error_data = e.response.json()
if isinstance(error_data, dict) and "error" in error_data:
error_info = error_data["error"]
if isinstance(error_info, dict) and "message" in error_info:
return error_info["message"]
except:
pass
return str(e)
Comment on lines 27 to 43
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Improve exception handling specificity

The enhanced error message extraction logic is good, but the bare except clause should be more specific to avoid catching system exceptions.

                 if isinstance(error_info, dict) and "message" in error_info:
                     return error_info["message"]
-        except:
+        except (AttributeError, TypeError, ValueError):
             pass
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def handle_openai_error(e: openai.OpenAIError) -> str:
"""Extract error message from OpenAI error."""
if isinstance(e.body, dict) and "message" in e.body:
# Try to get error message from different possible attributes
if hasattr(e, "body") and isinstance(e.body, dict) and "message" in e.body:
return e.body["message"]
elif hasattr(e, "message"):
return e.message
elif hasattr(e, "response") and hasattr(e.response, "json"):
try:
error_data = e.response.json()
if isinstance(error_data, dict) and "error" in error_data:
error_info = error_data["error"]
if isinstance(error_info, dict) and "message" in error_info:
return error_info["message"]
except:
pass
return str(e)
def handle_openai_error(e: openai.OpenAIError) -> str:
"""Extract error message from OpenAI error."""
# Try to get error message from different possible attributes
if hasattr(e, "body") and isinstance(e.body, dict) and "message" in e.body:
return e.body["message"]
elif hasattr(e, "message"):
return e.message
elif hasattr(e, "response") and hasattr(e.response, "json"):
try:
error_data = e.response.json()
if isinstance(error_data, dict) and "error" in error_data:
error_info = error_data["error"]
if isinstance(error_info, dict) and "message" in error_info:
return error_info["message"]
except (AttributeError, TypeError, ValueError):
pass
return str(e)
🧰 Tools
🪛 Ruff (0.12.2)

40-40: Do not use bare except

(E722)

🤖 Prompt for AI Agents
In backend/app/api/routes/responses.py between lines 26 and 42, the bare except
clause in the error handling should be replaced with a more specific exception,
such as catching JSONDecodeError or a relevant exception type from the
response.json() call, to avoid unintentionally catching system-level exceptions.
Update the except clause to catch only expected exceptions related to JSON
parsing or attribute errors.



Expand Down Expand Up @@ -98,6 +115,8 @@ def process_response(
assistant,
tracer: LangfuseTracer,
project_id: int,
organization_id: int,
session: Session,
):
"""Process a response and send callback with results, with Langfuse tracing."""
logger.info(
Expand All @@ -117,9 +136,21 @@ def process_response(
)

try:
# Get the latest conversation by ancestor ID to use as previous_response_id
ancestor_id = request.response_id
latest_conversation = None
if ancestor_id:
latest_conversation = get_conversation_by_ancestor_id(
session=session,
ancestor_response_id=ancestor_id,
project_id=project_id,
)
if latest_conversation:
ancestor_id = latest_conversation.response_id
Comment on lines +148 to +149
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Potential logic error in ancestor_id reassignment.

Line 149 reassigns ancestor_id = latest_conversation.response_id, but this contradicts the variable name and purpose. The ancestor_id should remain as the original request.response_id to maintain the root of the conversation chain.

Apply this fix:

-            if latest_conversation:
-                ancestor_id = latest_conversation.response_id
+            # ancestor_id should remain as request.response_id (the root)
+            # latest_conversation.response_id will be used as previous_response_id instead

Then update line 153 to use the correct previous response ID:

-            "previous_response_id": ancestor_id,
+            "previous_response_id": latest_conversation.response_id if latest_conversation else ancestor_id,
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if latest_conversation:
ancestor_id = latest_conversation.response_id
# keep ancestor_id as the root (request.response_id) only
ancestor_id = request.response_id
latest_conversation = (
session.query(OpenAIConversation)
.filter_by(ancestor_response_id=ancestor_id)
.order_by(OpenAIConversation.created_at.desc())
.first()
)
# ancestor_id should remain as request.response_id (the root)
# latest_conversation.response_id will be used as previous_response_id instead
conversation_data = OpenAIConversationCreate(
response_id=response.id,
- previous_response_id=ancestor_id,
+ previous_response_id=latest_conversation.response_id
+ if latest_conversation
+ else ancestor_id,
ancestor_response_id=request.response_id,
user_question=request.question,
response=response.output_text,
model=response.model,
assistant_id=request.assistant_id,
)
🤖 Prompt for AI Agents
In backend/app/api/routes/responses.py around lines 148 to 149, the assignment
of ancestor_id to latest_conversation.response_id is incorrect because
ancestor_id should keep the original request.response_id to represent the root
of the conversation chain. Fix this by not reassigning ancestor_id here and
ensure that subsequent logic, such as on line 153, uses the correct previous
response ID instead of ancestor_id.


params = {
"model": assistant.model,
"previous_response_id": request.response_id,
"previous_response_id": ancestor_id,
"instructions": assistant.instructions,
"temperature": assistant.temperature,
"input": [{"role": "user", "content": request.question}],
Expand Down Expand Up @@ -165,6 +196,35 @@ def process_response(
"error": None,
},
)
# Set ancestor_response_id using CRUD function
ancestor_response_id = (
latest_conversation.ancestor_response_id
if latest_conversation
else get_ancestor_id_from_response(
session=session,
current_response_id=response.id,
previous_response_id=response.previous_response_id,
project_id=project_id,
)
)

# Create conversation record in database
conversation_data = OpenAIConversationCreate(
response_id=response.id,
previous_response_id=response.previous_response_id,
ancestor_response_id=ancestor_response_id,
user_question=request.question,
response=response.output_text,
model=response.model,
assistant_id=request.assistant_id,
)

create_conversation(
session=session,
conversation=conversation_data,
project_id=project_id,
organization_id=organization_id,
)

request_dict = request.model_dump()
callback_response = ResponsesAPIResponse.success_response(
Expand Down Expand Up @@ -264,6 +324,8 @@ async def responses(
assistant,
tracer,
project_id,
organization_id,
_session,
)

logger.info(
Expand Down
42 changes: 42 additions & 0 deletions backend/app/crud/openai_conversation.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import logging
from typing import Optional
from sqlmodel import Session, select, func

from app.models import OpenAIConversation, OpenAIConversationCreate
from app.core.util import now

Expand Down Expand Up @@ -57,6 +58,47 @@ def get_conversation_by_ancestor_id(
return result


def get_ancestor_id_from_response(
session: Session,
current_response_id: str,
previous_response_id: str | None,
project_id: int,
) -> str:
"""
Set the ancestor_response_id based on previous_response_id.

Logic:
1. If previous_response_id is None, then ancestor_response_id = current_response_id
2. If previous_response_id is not None, look in db for that ID
- If found, use that conversation's ancestor_id
- If not found, ancestor_response_id = previous_response_id

Args:
session: Database session
current_response_id: The current response ID
previous_response_id: The previous response ID (can be None)
project_id: The project ID for scoping the search

Returns:
str: The determined ancestor_response_id
"""
if previous_response_id is None:
# If previous_response_id is None, then ancestor_response_id = current_response_id
return current_response_id

# If previous_response_id is not None, look in db for that ID
previous_conversation = get_conversation_by_response_id(
session=session, response_id=previous_response_id, project_id=project_id
)

if previous_conversation:
# If found, use that conversation's ancestor_id
return previous_conversation.ancestor_response_id
else:
# If not found, ancestor_response_id = previous_response_id
return previous_response_id


def get_conversations_count_by_project(
session: Session,
project_id: int,
Expand Down
4 changes: 2 additions & 2 deletions backend/app/models/openai_conversation.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from datetime import datetime
from typing import Optional
import re

from datetime import datetime
from typing import Optional
from pydantic import field_validator
from sqlmodel import Field, Relationship, SQLModel

Expand Down
3 changes: 1 addition & 2 deletions backend/app/tests/api/routes/test_openai_conversation.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from sqlmodel import Session
from fastapi.testclient import TestClient

from app.models import APIKeyPublic
from app.crud.openai_conversation import create_conversation
from app.models import APIKeyPublic
from app.models import OpenAIConversationCreate
from app.tests.utils.openai import generate_openai_id

Expand Down Expand Up @@ -431,7 +431,6 @@ def test_delete_conversation_success(
project_id=user_api_key.project_id,
organization_id=user_api_key.organization_id,
)

conversation_id = conversation.id

response = client.delete(
Expand Down
Loading