You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
When I use running the demo.py it's normal but when I try to ask question,something was wrong
Traceback (most recent call last):
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/gradio/routes.py", line 393, in run_predict
output = await app.get_blocks().process_api(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/gradio/blocks.py", line 1108, in process_api
result = await self.call_function(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/gradio/blocks.py", line 915, in call_function
prediction = await anyio.to_thread.run_sync(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/MiniGPT-4/demo.py", line 92, in gradio_answer
llm_message = chat.answer(conv=chat_state, img_list=img_list, max_new_tokens=1000, num_beams=num_beams, temperature=temperature)[0]
File "/MiniGPT-4/minigpt4/conversation/conversation.py", line 156, in answer
output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False)
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 3486, in decode
return self._decode(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/transformers/tokenization_utils.py", line 931, in _decode
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/transformers/tokenization_utils.py", line 912, in convert_ids_to_tokens
tokens.append(self._convert_id_to_token(index))
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/transformers/models/llama/tokenization_llama.py", line 129, in _convert_id_to_token
token = self.sp_model.IdToPiece(index)
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/sentencepiece/init.py", line 1045, in _batched_func
return _func(self, arg)
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/sentencepiece/init.py", line 1038, in _func
raise IndexError('piece id is out of range.')
IndexError: piece id is out of range.
The text was updated successfully, but these errors were encountered:
it looks like the tokenizer you use is wrong. I guess this is related to how to prepare the vicuna correctly. I'm currently preparing a guide for vicuna preparation and I'll get back to you once I finish
When I use running the demo.py it's normal but when I try to ask question,something was wrong
Traceback (most recent call last):
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/gradio/routes.py", line 393, in run_predict
output = await app.get_blocks().process_api(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/gradio/blocks.py", line 1108, in process_api
result = await self.call_function(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/gradio/blocks.py", line 915, in call_function
prediction = await anyio.to_thread.run_sync(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/MiniGPT-4/demo.py", line 92, in gradio_answer
llm_message = chat.answer(conv=chat_state, img_list=img_list, max_new_tokens=1000, num_beams=num_beams, temperature=temperature)[0]
File "/MiniGPT-4/minigpt4/conversation/conversation.py", line 156, in answer
output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False)
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 3486, in decode
return self._decode(
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/transformers/tokenization_utils.py", line 931, in _decode
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/transformers/tokenization_utils.py", line 912, in convert_ids_to_tokens
tokens.append(self._convert_id_to_token(index))
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/transformers/models/llama/tokenization_llama.py", line 129, in _convert_id_to_token
token = self.sp_model.IdToPiece(index)
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/sentencepiece/init.py", line 1045, in _batched_func
return _func(self, arg)
File "/root/anaconda3/envs/minigpt4/lib/python3.9/site-packages/sentencepiece/init.py", line 1038, in _func
raise IndexError('piece id is out of range.')
IndexError: piece id is out of range.
The text was updated successfully, but these errors were encountered: