diff --git a/workers/fund_public_goods/lib/strategy/utils/generate_keywords.py b/workers/fund_public_goods/lib/strategy/utils/generate_keywords.py index 2fd8645..1e4b552 100644 --- a/workers/fund_public_goods/lib/strategy/utils/generate_keywords.py +++ b/workers/fund_public_goods/lib/strategy/utils/generate_keywords.py @@ -24,7 +24,7 @@ def generate_keywords(project_descriptions: list[str]) -> list[list[str]]: keywords_prompt = ChatPromptTemplate.from_messages([ ("system", keywords_prompt_template), ]) - llm = ChatOpenAI(model="gpt-4-1106-preview") # type: ignore + llm = ChatOpenAI(model="gpt-4-turbo") # type: ignore keywords_chain = keywords_prompt | llm | StrOutputParser() diff --git a/workers/fund_public_goods/lib/strategy/utils/generate_queries.py b/workers/fund_public_goods/lib/strategy/utils/generate_queries.py index 135ebcf..c0a05f8 100644 --- a/workers/fund_public_goods/lib/strategy/utils/generate_queries.py +++ b/workers/fund_public_goods/lib/strategy/utils/generate_queries.py @@ -21,7 +21,7 @@ def generate_queries(prompt: str, n) -> list[str]: queries_prompt = ChatPromptTemplate.from_messages([ ("system", queries_prompt_template), ]) - llm = ChatOpenAI(model="gpt-4-1106-preview") # type: ignore + llm = ChatOpenAI(model="gpt-4-turbo") # type: ignore queries_chain = queries_prompt | llm | CommaSeparatedListOutputParser() diff --git a/workers/fund_public_goods/lib/strategy/utils/get_top_matching_projects.py b/workers/fund_public_goods/lib/strategy/utils/get_top_matching_projects.py index c94c9b8..030e395 100644 --- a/workers/fund_public_goods/lib/strategy/utils/get_top_matching_projects.py +++ b/workers/fund_public_goods/lib/strategy/utils/get_top_matching_projects.py @@ -46,7 +46,7 @@ def rerank_top_projects(prompt: str, projects: list[Projects]) -> list[Projects] formatted_prompt = reranking_prompt_template.format(prompt=prompt, separator=separator, projects=formatted_projects) response = openai.chat.completions.create( - model="gpt-4-1106-preview", + model="gpt-4-turbo", response_format={"type": "json_object"}, messages=[ {"role": "user", "content": formatted_prompt} diff --git a/workers/fund_public_goods/lib/strategy/utils/score_projects_impact_funding.py b/workers/fund_public_goods/lib/strategy/utils/score_projects_impact_funding.py index 78c4ad9..3abadc2 100644 --- a/workers/fund_public_goods/lib/strategy/utils/score_projects_impact_funding.py +++ b/workers/fund_public_goods/lib/strategy/utils/score_projects_impact_funding.py @@ -42,7 +42,7 @@ def score_projects_impact_funding(projects_with_report: list[Projects]) -> list[ ("system", score_projects_prompt_template), ]) - llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0, model_kwargs={'seed': 10}) # type: ignore + llm = ChatOpenAI(model="gpt-4-turbo", temperature=0, model_kwargs={'seed': 10}) # type: ignore scoring_chain = score_projects_prompt | llm | JsonOutputParser() diff --git a/workers/fund_public_goods/lib/strategy/utils/score_projects_relevancy.py b/workers/fund_public_goods/lib/strategy/utils/score_projects_relevancy.py index c99de1f..9708e84 100644 --- a/workers/fund_public_goods/lib/strategy/utils/score_projects_relevancy.py +++ b/workers/fund_public_goods/lib/strategy/utils/score_projects_relevancy.py @@ -41,7 +41,7 @@ def score_projects_relevancy(projects_with_report: list[tuple[Projects, str]], p ("system", score_projects_relevancy_prompt_template), ]) - llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0, model_kwargs={'seed': 10}) # type: ignore + llm = ChatOpenAI(model="gpt-4-turbo", temperature=0, model_kwargs={'seed': 10}) # type: ignore scoring_chain = score_projects_relevancy_prompt | llm | JsonOutputParser()