Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions industries/healthcare/agentic-healthcare-front-desk/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,14 @@ The agentic tool calling capability in each of the customer care assistants is p

## Prerequisites
### Hardware
There are no local GPU requirements for running any application in this repo. The LLMs utilized in LangGraph in this repo are by default set to calling NVIDIA AI Endpoints, as seen in the directory [`graph_definitions/`](./graph_definitions/), and require a valid NVIDIA API KEY.
There are no local GPU requirements for running any application in this repo. The LLMs utilized in LangGraph in this repo are by default set to calling NVIDIA AI Endpoints since `BASE_URL` is set to the default value of `"https://integrate.api.nvidia.com/v1"` in [vars.env](./vars.env), and require a valid NVIDIA API KEY. As seen in the [graph definitions](./graph_definitions/):
```python
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm_model = "meta/llama-3.1-70b-instruct"
assistant_llm = ChatNVIDIA(model=llm_model)
assistant_llm = ChatNVIDIA(model=llm_model, ...)
```
You can experiment with other LLMs available on build.nvidia.com by changing the `model` param for `ChatNVIDIA` in the Python files in the directory [`graph_definitions/`](./graph_definitions/).
You can experiment with other LLMs available on build.nvidia.com by changing the `LLM_MODEL` values in [vars.env](./vars.env), for passing into `ChatNVIDIA` in the Python files in the directory [`graph_definitions/`](./graph_definitions/).

If instead of calling NVIDIA AI Endpoints with an API key, you would like to host your own LLM NIM instance, please refer to the [Docker tab of the LLM NIM](https://build.nvidia.com/meta/llama-3_1-70b-instruct?snippet_tab=Docker) on how to host, and add a [`base_url` parameter](https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/#working-with-nvidia-nims) to point to your own instance when specifying `ChatNVIDIA` in the Python files in the directory [`graph_definitions/`](./graph_definitions/). For the hardware configuration of self hosting the LLM, please refer to the [documentation for LLM support matrix](https://docs.nvidia.com/nim/large-language-models/latest/support-matrix.html).
If instead of calling NVIDIA AI Endpoints with an API key, you would like to host your own LLM NIM instance, please refer to the [Docker tab of the LLM NIM](https://build.nvidia.com/meta/llama-3_1-70b-instruct?snippet_tab=Docker) on how to host, and changed the `BASE_URL` parameter in [vars.env](./vars.env) to [point to your own instance](https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/#working-with-nvidia-nims) when specifying `ChatNVIDIA` in the Python files in the directory [`graph_definitions/`](./graph_definitions/). For the hardware configuration of self hosting the LLM, please refer to the [documentation for LLM support matrix](https://docs.nvidia.com/nim/large-language-models/latest/support-matrix.html).

### NVIDIA API KEY
You will need an NVIDIA API KEY to call NVIDIA AI Endpoints. You can use different model API endpoints with the same API key, so even if you change the LLM specification in `ChatNVIDIA(model=llm_model)` you can still use the same API KEY.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@
#################
patient_id = '14867dba-fb11-4df3-9829-8e8e081b39e6' # test patient id from looking through https://launch.smarthealthit.org/
save_graph_to_png = True
main_llm_model = "meta/llama-3.1-70b-instruct"
specialized_llm_model = "meta/llama-3.1-70b-instruct"

env_var_file = "vars.env"

local_file_constant = "sample_db/test_db.sqlite"
Expand All @@ -73,9 +72,19 @@
RIVA_ASR_FUNCTION_ID = os.getenv("RIVA_ASR_FUNCTION_ID", None)
RIVA_TTS_FUNCTION_ID = os.getenv("RIVA_TTS_FUNCTION_ID", None)


assert os.environ['LLM_MODEL'] is not None, "Make sure you have your LLM_MODEL exported as a environment variable!"
main_llm_model = os.getenv("LLM_MODEL", None)

assert os.environ['LLM_MODEL'] is not None, "Make sure you have your LLM_MODEL exported as a environment variable!"
specialized_llm_model = os.getenv("LLM_MODEL", None)

assert os.environ['BASE_URL'] is not None, "Make sure you have your BASE_URL exported as a environment variable!"
base_url = os.getenv("BASE_URL", None)

### define which llm to use
main_assistant_llm = ChatNVIDIA(model=main_llm_model)#, base_url=base_url
specialized_assistant_llm = ChatNVIDIA(model=specialized_llm_model)#, base_url=base_url
main_assistant_llm = ChatNVIDIA(model=main_llm_model, base_url=base_url)
specialized_assistant_llm = ChatNVIDIA(model=specialized_llm_model, base_url=base_url)

def update_dialog_stack(left: list[str], right: Optional[str]) -> list[str]:
"""Push or pop the state."""
Expand Down Expand Up @@ -247,7 +256,7 @@ def print_gathered_patient_info(
patient_dob: datetime.date,
allergies_medication: List[str],
current_symptoms: str,
current_symptoms_duration: datetime.timedelta,
current_symptoms_duration: str,
pharmacy_location: str
):
"""This function prints out and transmits the gathered information for each patient intake field:
Expand Down Expand Up @@ -373,7 +382,7 @@ class ToPatientIntakeAssistant(BaseModel):
patient_dob: datetime.date = Field(description="The patient's date of birth.")
allergies_medication: List[str] = Field(description="A list of allergies in medication for the patient.")
current_symptoms: str = Field(description="A description of the current symptoms for the patient.")
current_symptoms_duration: datetime.timedelta = Field(description="The time duration of current symptoms.")
current_symptoms_duration: str = Field(description="The time duration of current symptoms.")
pharmacy_location: str = Field(description="The patient's pharmacy location.")
request: str = Field(
description="Any necessary information the patient intake assistant should clarify before proceeding."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
patient_id = '14867dba-fb11-4df3-9829-8e8e081b39e6' # test patient id from looking through https://launch.smarthealthit.org/
save_graph_to_png = True

llm_model = "meta/llama-3.1-70b-instruct"
env_var_file = "vars.env"

local_file_constant = "sample_db/test_db.sqlite"
Expand All @@ -63,8 +62,14 @@
RIVA_ASR_FUNCTION_ID = os.getenv("RIVA_ASR_FUNCTION_ID", None)
RIVA_TTS_FUNCTION_ID = os.getenv("RIVA_TTS_FUNCTION_ID", None)

assert os.environ['LLM_MODEL'] is not None, "Make sure you have your LLM_MODEL exported as a environment variable!"
llm_model = os.getenv("LLM_MODEL", None)

assert os.environ['BASE_URL'] is not None, "Make sure you have your BASE_URL exported as a environment variable!"
base_url = os.getenv("BASE_URL", None)

### define which llm to use
assistant_llm = ChatNVIDIA(model=llm_model) # base_url=base_url)
assistant_llm = ChatNVIDIA(model=llm_model, base_url=base_url)

########################
### Define the tools ###
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
#################
patient_id = '14867dba-fb11-4df3-9829-8e8e081b39e6' # test patient id from looking through https://launch.smarthealthit.org/
save_graph_to_png = True
llm_model = "meta/llama-3.1-70b-instruct"
env_var_file = "vars.env"


Expand All @@ -62,14 +61,20 @@

assert os.environ['NVIDIA_API_KEY'] is not None, "Make sure you have your NVIDIA_API_KEY exported as a environment variable!"
assert os.environ['TAVILY_API_KEY'] is not None, "Make sure you have your TAVILY_API_KEY exported as a environment variable!"

NVIDIA_API_KEY=os.getenv("NVIDIA_API_KEY", None)
RIVA_API_URI = os.getenv("RIVA_API_URI", None)

RIVA_ASR_FUNCTION_ID = os.getenv("RIVA_ASR_FUNCTION_ID", None)
RIVA_TTS_FUNCTION_ID = os.getenv("RIVA_TTS_FUNCTION_ID", None)

assert os.environ['LLM_MODEL'] is not None, "Make sure you have your LLM_MODEL exported as a environment variable!"
llm_model = os.getenv("LLM_MODEL", None)

assert os.environ['BASE_URL'] is not None, "Make sure you have your BASE_URL exported as a environment variable!"
base_url = os.getenv("BASE_URL", None)

### define which llm to use
assistant_llm = ChatNVIDIA(model=llm_model)#, base_url=base_url
assistant_llm = ChatNVIDIA(model=llm_model, base_url=base_url)

########################
### Define the tools ###
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
patient_id = '14867dba-fb11-4df3-9829-8e8e081b39e6' # test patient id from looking through https://launch.smarthealthit.org/
save_graph_to_png = True

llm_model = "meta/llama-3.1-70b-instruct"
env_var_file = "vars.env"


Expand All @@ -48,14 +47,20 @@
print("Your NVIDIA_API_KEY is set to: ", os.environ['NVIDIA_API_KEY'])

assert os.environ['NVIDIA_API_KEY'] is not None, "Make sure you have your NVIDIA_API_KEY exported as a environment variable!"

NVIDIA_API_KEY=os.getenv("NVIDIA_API_KEY", None)

RIVA_API_URI = os.getenv("RIVA_API_URI", None)
RIVA_ASR_FUNCTION_ID = os.getenv("RIVA_ASR_FUNCTION_ID", None)
RIVA_TTS_FUNCTION_ID = os.getenv("RIVA_TTS_FUNCTION_ID", None)

assert os.environ['LLM_MODEL'] is not None, "Make sure you have your LLM_MODEL exported as a environment variable!"
llm_model = os.getenv("LLM_MODEL", None)

assert os.environ['BASE_URL'] is not None, "Make sure you have your BASE_URL exported as a environment variable!"
base_url = os.getenv("BASE_URL", None)

### define which llm to use
assistant_llm = ChatNVIDIA(model=llm_model) # base_url=base_url
assistant_llm = ChatNVIDIA(model=llm_model, base_url=base_url)

########################
### Define the tools ###
Expand All @@ -73,7 +78,7 @@ def print_gathered_patient_info(
patient_dob: datetime.date,
allergies_medication: List[str],
current_symptoms: str,
current_symptoms_duration: datetime.timedelta,
current_symptoms_duration: str,
pharmacy_location: str
):
"""This function prints out and transmits the gathered information for each patient intake field:
Expand Down
4 changes: 3 additions & 1 deletion industries/healthcare/agentic-healthcare-front-desk/vars.env
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@ NVIDIA_API_KEY="nvapi-"
TAVILY_API_KEY="tvly-"
RIVA_API_URI=grpc.nvcf.nvidia.com:443
RIVA_ASR_FUNCTION_ID=1598d209-5e27-4d3c-8079-4751568b1081
RIVA_TTS_FUNCTION_ID=0149dedb-2be8-4195-b9a0-e57e0e14f972
RIVA_TTS_FUNCTION_ID=0149dedb-2be8-4195-b9a0-e57e0e14f972
BASE_URL="https://integrate.api.nvidia.com/v1"
LLM_MODEL="meta/llama-3.3-70b-instruct"