You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Uncaught (in promise) DOMException: Failed to execute 'insertBefore' on 'Node': The node before which the new node is to be inserted is not a child of this node.
#8288
Closed
1 task done
lenartgolob opened this issue
May 14, 2024
· 4 comments
I am running PrivateGPT (https://github.com/zylon-ai/private-gpt) and eventually I always get this error: Uncaught (in promise) DOMException: Failed to execute 'insertBefore' on 'Node': The node before which the new node is to be inserted is not a child of this node.
The backend still works, but the UI freezes ad stops working until I refresh.
I tried changing the gradio version to 4.20.1, 3.40 and 3.41 but none of this solutions work, I am still getting the same error.
What else can I try to resolve this issue?
Have you searched existing issues? 🔎
I have searched and found no existing issues
Reproduction
"""This file should be imported if and only if you want to run the UI locally."""importitertoolsimportloggingimporttimefromcollections.abcimportIterablefrompathlibimportPathfromtypingimportAnyimportgradioasgr# type: ignorefromfastapiimportFastAPIfromgradio.themes.utils.colorsimportslate# type: ignorefrominjectorimportinject, singletonfromllama_index.core.llmsimportChatMessage, ChatResponse, MessageRolefrompydanticimportBaseModelfromprivate_gpt.constantsimportPROJECT_ROOT_PATHfromprivate_gpt.diimportglobal_injectorfromprivate_gpt.open_ai.extensions.context_filterimportContextFilterfromprivate_gpt.server.chat.chat_serviceimportChatService, CompletionGenfromprivate_gpt.server.chunks.chunks_serviceimportChunk, ChunksServicefromprivate_gpt.server.ingest.ingest_serviceimportIngestServicefromprivate_gpt.settings.settingsimportsettingsfromprivate_gpt.ui.imagesimportlogo_svglogger=logging.getLogger(__name__)
THIS_DIRECTORY_RELATIVE=Path(__file__).parent.relative_to(PROJECT_ROOT_PATH)
# Should be "private_gpt/ui/avatar-bot.ico"AVATAR_BOT=THIS_DIRECTORY_RELATIVE/"avatar-bot.ico"UI_TAB_TITLE="My Private GPT"SOURCES_SEPARATOR="\n\n Sources: \n"MODES= ["Query Files", "Search Files", "LLM Chat (no context from files)"]
classSource(BaseModel):
file: strpage: strtext: strclassConfig:
frozen=True@staticmethoddefcurate_sources(sources: list[Chunk]) ->list["Source"]:
curated_sources= []
forchunkinsources:
doc_metadata=chunk.document.doc_metadatafile_name=doc_metadata.get("file_name", "-") ifdoc_metadataelse"-"page_label=doc_metadata.get("page_label", "-") ifdoc_metadataelse"-"source=Source(file=file_name, page=page_label, text=chunk.text)
curated_sources.append(source)
curated_sources=list(
dict.fromkeys(curated_sources).keys()
) # Unique sources onlyreturncurated_sources@singletonclassPrivateGptUi:
@injectdef__init__(
self,
ingest_service: IngestService,
chat_service: ChatService,
chunks_service: ChunksService,
) ->None:
self._ingest_service=ingest_serviceself._chat_service=chat_serviceself._chunks_service=chunks_service# Cache the UI blocksself._ui_block=Noneself._selected_filename=None# Initialize system prompt based on default modeself.mode=MODES[0]
self._system_prompt=self._get_default_system_prompt(self.mode)
def_chat(self, message: str, history: list[list[str]], mode: str, *_: Any) ->Any:
defyield_deltas(completion_gen: CompletionGen) ->Iterable[str]:
full_response: str=""stream=completion_gen.responsefordeltainstream:
ifisinstance(delta, str):
full_response+=str(delta)
elifisinstance(delta, ChatResponse):
full_response+=delta.deltaor""yieldfull_responsetime.sleep(0.02)
ifcompletion_gen.sources:
full_response+=SOURCES_SEPARATORcur_sources=Source.curate_sources(completion_gen.sources)
sources_text="\n\n\n"used_files=set()
forindex, sourceinenumerate(cur_sources, start=1):
iff"{source.file}-{source.page}"notinused_files:
sources_text= (
sources_text+f"{index}. {source.file} (page {source.page}) \n\n"
)
used_files.add(f"{source.file}-{source.page}")
full_response+=sources_textyieldfull_responsedefbuild_history() ->list[ChatMessage]:
history_messages: list[ChatMessage] =list(
itertools.chain(
*[
[
ChatMessage(content=interaction[0], role=MessageRole.USER),
ChatMessage(
# Remove from history content the Sources informationcontent=interaction[1].split(SOURCES_SEPARATOR)[0],
role=MessageRole.ASSISTANT,
),
]
forinteractioninhistory
]
)
)
# max 20 messages to try to avoid context overflowreturnhistory_messages[:20]
new_message=ChatMessage(content=message, role=MessageRole.USER)
all_messages= [*build_history(), new_message]
# If a system prompt is set, add it as a system messageifself._system_prompt:
all_messages.insert(
0,
ChatMessage(
content=self._system_prompt,
role=MessageRole.SYSTEM,
),
)
matchmode:
case"Query Files":
# Use only the selected file for the querycontext_filter=Noneifself._selected_filenameisnotNone:
docs_ids= []
foringested_documentinself._ingest_service.list_ingested():
if (
ingested_document.doc_metadata["file_name"]
==self._selected_filename
):
docs_ids.append(ingested_document.doc_id)
context_filter=ContextFilter(docs_ids=docs_ids)
query_stream=self._chat_service.stream_chat(
messages=all_messages,
use_context=True,
context_filter=context_filter,
)
yieldfromyield_deltas(query_stream)
case"LLM Chat (no context from files)":
llm_stream=self._chat_service.stream_chat(
messages=all_messages,
use_context=False,
)
yieldfromyield_deltas(llm_stream)
case"Search Files":
response=self._chunks_service.retrieve_relevant(
text=message, limit=4, prev_next_chunks=0
)
sources=Source.curate_sources(response)
yield"\n\n\n".join(
f"{index}. **{source.file} "f"(page {source.page})**\n "f"{source.text}"forindex, sourceinenumerate(sources, start=1)
)
# On initialization and on mode change, this function set the system prompt# to the default prompt based on the mode (and user settings).@staticmethoddef_get_default_system_prompt(mode: str) ->str:
p=""matchmode:
# For query chat mode, obtain default system prompt from settingscase"Query Files":
p=settings().ui.default_query_system_prompt# For chat mode, obtain default system prompt from settingscase"LLM Chat (no context from files)":
p=settings().ui.default_chat_system_prompt# For any other mode, clear the system promptcase_:
p=""returnpdef_set_system_prompt(self, system_prompt_input: str) ->None:
logger.info(f"Setting system prompt to: {system_prompt_input}")
self._system_prompt=system_prompt_inputdef_set_current_mode(self, mode: str) ->Any:
self.mode=modeself._set_system_prompt(self._get_default_system_prompt(mode))
# Update placeholder and allow interaction if default system prompt is setifself._system_prompt:
returngr.update(placeholder=self._system_prompt, interactive=True)
# Update placeholder and disable interaction if no default system prompt is setelse:
returngr.update(placeholder=self._system_prompt, interactive=False)
def_list_ingested_files(self) ->list[list[str]]:
files=set()
foringested_documentinself._ingest_service.list_ingested():
ifingested_document.doc_metadataisNone:
# Skipping documents without metadatacontinuefile_name=ingested_document.doc_metadata.get(
"file_name", "[FILE NAME MISSING]"
)
files.add(file_name)
return [[row] forrowinfiles]
def_upload_file(self, files: list[str]) ->None:
logger.debug("Loading count=%s files", len(files))
paths= [Path(file) forfileinfiles]
# remove all existing Documents with name identical to a new file upload:file_names= [path.nameforpathinpaths]
doc_ids_to_delete= []
foringested_documentinself._ingest_service.list_ingested():
if (
ingested_document.doc_metadataandingested_document.doc_metadata["file_name"] infile_names
):
doc_ids_to_delete.append(ingested_document.doc_id)
iflen(doc_ids_to_delete) >0:
logger.info(
"Uploading file(s) which were already ingested: %s document(s) will be replaced.",
len(doc_ids_to_delete),
)
fordoc_idindoc_ids_to_delete:
self._ingest_service.delete(doc_id)
self._ingest_service.bulk_ingest([(str(path.name), path) forpathinpaths])
def_delete_all_files(self) ->Any:
ingested_files=self._ingest_service.list_ingested()
logger.debug("Deleting count=%s files", len(ingested_files))
foringested_documentiningested_files:
self._ingest_service.delete(ingested_document.doc_id)
return [
gr.List(self._list_ingested_files()),
gr.components.Button(interactive=False),
gr.components.Button(interactive=False),
gr.components.Textbox("All files"),
]
def_delete_selected_file(self) ->Any:
logger.debug("Deleting selected %s", self._selected_filename)
# Note: keep looping for pdf's (each page became a Document)foringested_documentinself._ingest_service.list_ingested():
if (
ingested_document.doc_metadataandingested_document.doc_metadata["file_name"]
==self._selected_filename
):
self._ingest_service.delete(ingested_document.doc_id)
return [
gr.List(self._list_ingested_files()),
gr.components.Button(interactive=False),
gr.components.Button(interactive=False),
gr.components.Textbox("All files"),
]
def_deselect_selected_file(self) ->Any:
self._selected_filename=Nonereturn [
gr.components.Button(interactive=False),
gr.components.Button(interactive=False),
gr.components.Textbox("All files"),
]
def_selected_a_file(self, select_data: gr.SelectData) ->Any:
self._selected_filename=select_data.valuereturn [
gr.components.Button(interactive=True),
gr.components.Button(interactive=True),
gr.components.Textbox(self._selected_filename),
]
def_build_ui_blocks(self) ->gr.Blocks:
logger.debug("Creating the UI blocks")
withgr.Blocks(
title=UI_TAB_TITLE,
theme=gr.themes.Soft(primary_hue=slate),
css=".logo { ""display:flex;""background-color: #C7BAFF;""height: 80px;""border-radius: 8px;""align-content: center;""justify-content: center;""align-items: center;""}"".logo img { height: 25% }"".contain { display: flex !important; flex-direction: column !important; }""#component-0, #component-3, #component-10, #component-8 { height: 100% !important; }""#chatbot { flex-grow: 1 !important; overflow: auto !important;}""#col { height: calc(100vh - 112px - 16px) !important; }",
) asblocks:
withgr.Row():
gr.HTML(f"<div class='logo'/><img src={logo_svg} alt=PrivateGPT></div")
withgr.Row(equal_height=False):
withgr.Column(scale=3):
mode=gr.Radio(
MODES,
label="Mode",
value="Query Files",
)
upload_button=gr.components.UploadButton(
"Upload File(s)",
type="filepath",
file_count="multiple",
size="sm",
)
ingested_dataset=gr.List(
self._list_ingested_files,
headers=["File name"],
label="Ingested Files",
height=235,
interactive=False,
render=False, # Rendered under the button
)
upload_button.upload(
self._upload_file,
inputs=upload_button,
outputs=ingested_dataset,
)
ingested_dataset.change(
self._list_ingested_files,
outputs=ingested_dataset,
)
ingested_dataset.render()
deselect_file_button=gr.components.Button(
"De-select selected file", size="sm", interactive=False
)
selected_text=gr.components.Textbox(
"All files", label="Selected for Query or Deletion", max_lines=1
)
delete_file_button=gr.components.Button(
"🗑️ Delete selected file",
size="sm",
visible=settings().ui.delete_file_button_enabled,
interactive=False,
)
delete_files_button=gr.components.Button(
"⚠️ Delete ALL files",
size="sm",
visible=settings().ui.delete_all_files_button_enabled,
)
deselect_file_button.click(
self._deselect_selected_file,
outputs=[
delete_file_button,
deselect_file_button,
selected_text,
],
)
ingested_dataset.select(
fn=self._selected_a_file,
outputs=[
delete_file_button,
deselect_file_button,
selected_text,
],
)
delete_file_button.click(
self._delete_selected_file,
outputs=[
ingested_dataset,
delete_file_button,
deselect_file_button,
selected_text,
],
)
delete_files_button.click(
self._delete_all_files,
outputs=[
ingested_dataset,
delete_file_button,
deselect_file_button,
selected_text,
],
)
system_prompt_input=gr.Textbox(
placeholder=self._system_prompt,
label="System Prompt",
lines=2,
interactive=True,
render=False,
)
# When mode changes, set default system promptmode.change(
self._set_current_mode, inputs=mode, outputs=system_prompt_input
)
# On blur, set system prompt to use in queriessystem_prompt_input.blur(
self._set_system_prompt,
inputs=system_prompt_input,
)
defget_model_label() ->str|None:
"""Get model label from llm mode setting YAML. Raises: ValueError: If an invalid 'llm_mode' is encountered. Returns: str: The corresponding model label. """# Get model label from llm mode setting YAML# Labels: local, openai, openailike, sagemaker, mock, ollamaconfig_settings=settings()
ifconfig_settingsisNone:
raiseValueError("Settings are not configured.")
# Get llm_mode from settingsllm_mode=config_settings.llm.mode# Mapping of 'llm_mode' to corresponding model labelsmodel_mapping= {
"llamacpp": config_settings.llamacpp.llm_hf_model_file,
"openai": config_settings.openai.model,
"openailike": config_settings.openai.model,
"sagemaker": config_settings.sagemaker.llm_endpoint_name,
"mock": llm_mode,
"ollama": config_settings.ollama.llm_model,
}
ifllm_modenotinmodel_mapping:
print(f"Invalid 'llm mode': {llm_mode}")
returnNonereturnmodel_mapping[llm_mode]
withgr.Column(scale=7, elem_id="col"):
# Determine the model label based on the value of PGPT_PROFILESmodel_label=get_model_label()
ifmodel_labelisnotNone:
label_text= (
f"LLM: {settings().llm.mode} | Model: {model_label}"
)
else:
label_text=f"LLM: {settings().llm.mode}"_=gr.ChatInterface(
self._chat,
chatbot=gr.Chatbot(
label=label_text,
show_copy_button=True,
elem_id="chatbot",
render=False,
avatar_images=(
None,
AVATAR_BOT,
),
),
additional_inputs=[mode, upload_button, system_prompt_input],
)
returnblocksdefget_ui_blocks(self) ->gr.Blocks:
ifself._ui_blockisNone:
self._ui_block=self._build_ui_blocks()
returnself._ui_blockdefmount_in_app(self, app: FastAPI, path: str) ->None:
blocks=self.get_ui_blocks()
blocks.queue()
logger.info("Mounting the gradio UI, at path=%s", path)
gr.mount_gradio_app(app, blocks, path=path)
if__name__=="__main__":
ui=global_injector.get(PrivateGptUi)
_blocks=ui.get_ui_blocks()
_blocks.queue()
_blocks.launch(debug=False, show_api=False)
Describe the bug
I am running PrivateGPT (https://github.com/zylon-ai/private-gpt) and eventually I always get this error:
Uncaught (in promise) DOMException: Failed to execute 'insertBefore' on 'Node': The node before which the new node is to be inserted is not a child of this node.
The backend still works, but the UI freezes ad stops working until I refresh.
I tried changing the gradio version to 4.20.1, 3.40 and 3.41 but none of this solutions work, I am still getting the same error.
What else can I try to resolve this issue?
Have you searched existing issues? 🔎
Reproduction
Screenshot
No response
Logs
No response
System Info
Severity
Blocking usage of gradio
The text was updated successfully, but these errors were encountered: