Skip to content

Commit

Permalink
7B-chat-hf
Browse files Browse the repository at this point in the history
  • Loading branch information
wu-yy committed Jul 20, 2023
1 parent 6b115d2 commit c787b05
Show file tree
Hide file tree
Showing 4 changed files with 337 additions and 0 deletions.
7 changes: 7 additions & 0 deletions examples/hf-llama2-7b-gradio/READMD.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
1. 指定模型路径
修改 model.py 里面的 model_id为你本地的模型路径。

2. 执行
```
bash start.sh
```
267 changes: 267 additions & 0 deletions examples/hf-llama2-7b-gradio/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,267 @@
from typing import Iterator

import gradio as gr
import torch

from model import run

DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
"""
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024

DESCRIPTION = """
# Llama-2 7B Chat
这个空间展示了由Meta公司提供的名为 [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) 的LLaM 2模型,它有7亿个参数,并经过了聊天指令的微调。 如果您想运行自己的服务,也可以在Inference端点上部署此模型 [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
🔎 更多关于LLama2家族的模型以及如何与Transformer一起使用的详细信息,请查看[at our blog post](https://huggingface.co/blog/llama2).
🔨 如果您更喜欢更强大的模型,可以考虑13B版本[13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) 或大型的70B模型演示 [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
😊 欢迎star✨[中文社区](https://github.com/FlagAlpha/Llama2-Chinese)
"""

LICENSE = """
<p/>
---
As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta,
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
"""

if not torch.cuda.is_available():
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'


def clear_and_save_textbox(message: str) -> tuple[str, str]:
return '', message


def display_input(message: str,
history: list[tuple[str, str]]) -> list[tuple[str, str]]:
history.append((message, ''))
return history


def delete_prev_fn(
history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
try:
message, _ = history.pop()
except IndexError:
message = ''
return history, message or ''


def generate(
message: str,
history_with_input: list[tuple[str, str]],
system_prompt: str,
max_new_tokens: int,
top_p: float,
temperature: float,
top_k: int,
) -> Iterator[list[tuple[str, str]]]:
if max_new_tokens > MAX_MAX_NEW_TOKENS:
raise ValueError

history = history_with_input[:-1]
generator = run(message, history, system_prompt, max_new_tokens,
temperature, top_p, top_k)
try:
first_response = next(generator)
yield history + [(message, first_response)]
except StopIteration:
yield history + [(message, '')]
for response in generator:
yield history + [(message, response)]


def process_example(message: str) -> tuple[str, list[tuple[str, str]]]:
generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 1024, 0.95, 1,
1000)
for x in generator:
pass
return '', x


with gr.Blocks(css='style.css') as demo:
gr.Markdown(DESCRIPTION)
gr.DuplicateButton(value='Duplicate Space for private use',
elem_id='duplicate-button')

with gr.Group():
chatbot = gr.Chatbot(label='Chatbot')
with gr.Row():
textbox = gr.Textbox(
container=False,
show_label=False,
placeholder='Type a message...',
scale=10,
)
submit_button = gr.Button('Submit',
variant='primary',
scale=1,
min_width=0)
with gr.Row():
retry_button = gr.Button('🔄 Retry', variant='secondary')
undo_button = gr.Button('↩️ Undo', variant='secondary')
clear_button = gr.Button('🗑️ Clear', variant='secondary')

saved_input = gr.State()

with gr.Accordion(label='Advanced options', open=False):
system_prompt = gr.Textbox(label='System prompt',
value=DEFAULT_SYSTEM_PROMPT,
lines=6)
max_new_tokens = gr.Slider(
label='Max new tokens',
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
)
temperature = gr.Slider(
label='Temperature',
minimum=0.1,
maximum=4.0,
step=0.1,
value=1.0,
)
top_p = gr.Slider(
label='Top-p (nucleus sampling)',
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.95,
)
top_k = gr.Slider(
label='Top-k',
minimum=1,
maximum=1000,
step=1,
value=50,
)

gr.Examples(
examples=[
'Hello there! How are you doing?',
'Can you explain briefly to me what is the Python programming language?',
'Explain the plot of Cinderella in a sentence.',
'How many hours does it take a man to eat a Helicopter?',
"Write a 100-word article on 'Benefits of Open-Source in AI research'",
],
inputs=textbox,
outputs=[textbox, chatbot],
fn=process_example,
cache_examples=True,
)

gr.Markdown(LICENSE)

textbox.submit(
fn=clear_and_save_textbox,
inputs=textbox,
outputs=[textbox, saved_input],
api_name=False,
queue=False,
).then(
fn=display_input,
inputs=[saved_input, chatbot],
outputs=chatbot,
api_name=False,
queue=False,
).then(
fn=generate,
inputs=[
saved_input,
chatbot,
system_prompt,
max_new_tokens,
temperature,
top_p,
top_k,
],
outputs=chatbot,
api_name=False,
)

button_event_preprocess = submit_button.click(
fn=clear_and_save_textbox,
inputs=textbox,
outputs=[textbox, saved_input],
api_name=False,
queue=False,
).then(
fn=display_input,
inputs=[saved_input, chatbot],
outputs=chatbot,
api_name=False,
queue=False,
).then(
fn=generate,
inputs=[
saved_input,
chatbot,
system_prompt,
max_new_tokens,
temperature,
top_p,
top_k,
],
outputs=chatbot,
api_name=False,
)

retry_button.click(
fn=delete_prev_fn,
inputs=chatbot,
outputs=[chatbot, saved_input],
api_name=False,
queue=False,
).then(
fn=display_input,
inputs=[saved_input, chatbot],
outputs=chatbot,
api_name=False,
queue=False,
).then(
fn=generate,
inputs=[
saved_input,
chatbot,
max_new_tokens,
temperature,
top_p,
top_k,
],
outputs=chatbot,
api_name=False,
)

undo_button.click(
fn=delete_prev_fn,
inputs=chatbot,
outputs=[chatbot, saved_input],
api_name=False,
queue=False,
).then(
fn=lambda x: x,
inputs=[saved_input],
outputs=textbox,
api_name=False,
queue=False,
)

clear_button.click(
fn=lambda: ([], ''),
outputs=[chatbot, saved_input],
queue=False,
api_name=False,
)

demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=40007)

61 changes: 61 additions & 0 deletions examples/hf-llama2-7b-gradio/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
from threading import Thread
from typing import Iterator

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer

model_id = '/path/models--meta-llama--Llama-2-7b-chat-hf'

if torch.cuda.is_available():
model = AutoModelForCausalLM.from_pretrained(
model_id,
local_files_only=True,
torch_dtype=torch.float16,
device_map='auto',
load_in_8bit=True
)
else:
model = None
tokenizer = AutoTokenizer.from_pretrained(model_id)


def get_prompt(message: str, chat_history: list[tuple[str, str]],
system_prompt: str) -> str:
texts = [f'[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n']
for user_input, response in chat_history:
texts.append(f'{user_input.strip()} [/INST] {response.strip()} </s><s> [INST] ')
texts.append(f'{message.strip()} [/INST]')
return ''.join(texts)


def run(message: str,
chat_history: list[tuple[str, str]],
system_prompt: str,
max_new_tokens: int = 1024,
temperature: float = 0.8,
top_p: float = 0.95,
top_k: int = 50) -> Iterator[str]:
prompt = get_prompt(message, chat_history, system_prompt)
inputs = tokenizer([prompt], return_tensors='pt').to("cuda")

streamer = TextIteratorStreamer(tokenizer,
timeout=10.,
skip_prompt=True,
skip_special_tokens=True)
generate_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_beams=1,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()

outputs = []
for text in streamer:
outputs.append(text)
yield ''.join(outputs)
2 changes: 2 additions & 0 deletions examples/hf-llama2-7b-gradio/start.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# run llama2 gradio
CUDA_VISIBLE_DEVICES=0 python app.py

0 comments on commit c787b05

Please sign in to comment.