Skip to content

Commit

Permalink
Testing streaming outputs
Browse files Browse the repository at this point in the history
Various version of this PR are for testing streaming implementations of the HuggingFace model parsers
  • Loading branch information
Rossdan Craig rossdan@lastmileai.dev committed Jan 10, 2024
1 parent 322ed18 commit 11ace0a
Show file tree
Hide file tree
Showing 4 changed files with 72 additions and 3 deletions.
31 changes: 31 additions & 0 deletions cookbooks/Gradio/hf_model_parsers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from aiconfig_extension_hugging_face import (
HuggingFaceAutomaticSpeechRecognitionTransformer,
HuggingFaceImage2TextTransformer,
HuggingFaceTextSummarizationTransformer,
HuggingFaceText2ImageDiffusor,
HuggingFaceText2SpeechTransformer,
HuggingFaceTextGenerationTransformer,
HuggingFaceTextTranslationTransformer,
)
from aiconfig import (AIConfigRuntime, ModelParserRegistry)

def register_model_parsers() -> None:
"""Register model parsers for HuggingFace models.
"""
# Audio --> Text
# AIConfigRuntime.register_model_parser(HuggingFaceAutomaticSpeechRecognitionTransformer(), "AutomaticSpeechRecognition")

# # Image --> Text
# AIConfigRuntime.register_model_parser(HuggingFaceImage2TextTransformer(), "Image2Text")

# # Text --> Image
# AIConfigRuntime.register_model_parser(HuggingFaceText2ImageDiffusor(), "Text2Image")

# # Text --> Audio
# AIConfigRuntime.register_model_parser(HuggingFaceText2SpeechTransformer(), "Text2Speech")

# # Text --> Text
# AIConfigRuntime.register_model_parser(HuggingFaceTextGenerationTransformer(), "TextGeneration")
# AIConfigRuntime.register_model_parser(HuggingFaceTextSummarizationTransformer(), "TextSummarization")
ModelParserRegistry.register_model_parser(HuggingFaceTextSummarizationTransformer())
# AIConfigRuntime.register_model_parser(HuggingFaceTextTranslationTransformer(), "TextTranslation")
38 changes: 38 additions & 0 deletions cookbooks/Gradio/huggingface.aiconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
{
"name": "The Tale of the Quick Brown Fox",
"schema_version": "latest",
"metadata": {
"parameters": {},
"models": {
"facebook/bart-large-cnn": {
"model": "facebook/bart-large-cnn"
}
},
"default_model": "TextGeneration",
"model_parsers": {
"facebook/bart-large-cnn": "HuggingFaceTextSummarizationTransformer"
}
},
"description": "The Tale of the Quick Brown Fox",
"prompts": [
{
"name": "Summarize a story",
"input": "Once upon a time, in a lush and vibrant forest, there lived a magnificent creature known as the Quick Brown Fox. This fox was unlike any other, possessing incredible speed and agility that awed all the animals in the forest. With its fur as golden as the sun and its eyes as sharp as emeralds, the Quick Brown Fox was admired by everyone, from the tiniest hummingbird to the mightiest bear. The fox had a kind heart and would often lend a helping paw to those in need. The Quick Brown Fox had a particular fondness for games and challenges. It loved to test its skills against others, always seeking new adventures to satisfy its boundless curiosity. Its favorite game was called \"The Great Word Hunt,\" where it would embark on a quest to find hidden words scattered across the forest. \n\nOne day it got very old and died",
"metadata": {
"model": {
"name": "facebook/bart-large-cnn",
"settings": {
"max_length": 100,
"min_length": 50,
"num_beams": 1
}
},
"parameters": {
"city": "New York"
}
},
"outputs": []
}
],
"$schema": "https://json.schemastore.org/aiconfig-1.0"
}
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def construct_stream_output(
"metadata": {},
}
)

accumulated_message = ""
for new_text in streamer:
if isinstance(new_text, str):
Expand Down
4 changes: 2 additions & 2 deletions python/src/aiconfig/editor/server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,9 +293,9 @@ def kill_thread(thread_id: int | None):

# Yea I know time.sleep() isn't super accurate, but it's fine,
# we can fix later
time.sleep(0.1)
# time.sleep(0.1)
wait_time_in_seconds += SLEEP_DELAY_SECONDS
print(f"Output queue is currently empty. Waiting for {wait_time_in_seconds:.1f}s...")
# print(f"Output queue is currently empty. Waiting for {wait_time_in_seconds:.1f}s...")

# Yield in flask is weird and you either need to send responses as a
# string, or artificially wrap them around "[" and "]"
Expand Down

0 comments on commit 11ace0a

Please sign in to comment.