Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

failed to load character annoy metadata, generating from scratch... #7

Closed
YenRaven opened this issue May 23, 2023 · 5 comments
Closed

Comments

@YenRaven
Copy link
Owner

          failed to load character annoy metadata, generating from scratch...

Traceback (most recent call last):
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\routes.py", line 414, in run_predict
output = await app.get_blocks().process_api(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1323, in process_api
result = await self.call_function(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1067, in call_function
prediction = await utils.async_iteration(iterator)
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 339, in async_iteration
return await iterator.anext()
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 332, in anext
return await anyio.to_thread.run_sync(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 867, in run
result = context.run(func, *args)
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 315, in run_sync_iterator_async
return next(iterator)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 319, in generate_chat_reply_wrapper
for i, history in enumerate(generate_chat_reply(text, shared.history, state, regenerate, _continue, loading_message=True)):
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 313, in generate_chat_reply
for history in chatbot_wrapper(text, history, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message):
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 226, in chatbot_wrapper
prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\extensions.py", line 193, in apply_extensions
return EXTENSION_MAP[typ](*args, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\extensions.py", line 80, in _apply_custom_generate_chat_prompt
return extension.custom_generate_chat_prompt(text, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\extensions\annoy_ltm\script.py", line 672, in custom_generate_chat_prompt
return generator.custom_generate_chat_prompt(user_input, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\extensions\annoy_ltm\script.py", line 519, in custom_generate_chat_prompt
loaded_annoy_index = AnnoyIndex(shared.model.model.config.hidden_size, 'angular')
AttributeError: 'Llama' object has no attribute 'config'
failed to load character annoy metadata, generating from scratch...
Traceback (most recent call last):
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\routes.py", line 414, in run_predict
output = await app.get_blocks().process_api(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1323, in process_api
result = await self.call_function(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1067, in call_function
prediction = await utils.async_iteration(iterator)
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 339, in async_iteration
return await iterator.anext()
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 332, in anext
return await anyio.to_thread.run_sync(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 867, in run
result = context.run(func, *args)
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 315, in run_sync_iterator_async
return next(iterator)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 319, in generate_chat_reply_wrapper
for i, history in enumerate(generate_chat_reply(text, shared.history, state, regenerate, _continue, loading_message=True)):
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 313, in generate_chat_reply
for history in chatbot_wrapper(text, history, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message):
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 226, in chatbot_wrapper
prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\extensions.py", line 193, in apply_extensions
return EXTENSION_MAP[typ](*args, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\extensions.py", line 80, in _apply_custom_generate_chat_prompt
return extension.custom_generate_chat_prompt(text, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\extensions\annoy_ltm\script.py", line 672, in custom_generate_chat_prompt
return generator.custom_generate_chat_prompt(user_input, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\extensions\annoy_ltm\script.py", line 519, in custom_generate_chat_prompt
loaded_annoy_index = AnnoyIndex(shared.model.model.config.hidden_size, 'angular')
AttributeError: 'Llama' object has no attribute 'config'
failed to load character annoy metadata, generating from scratch...
Traceback (most recent call last):
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\routes.py", line 414, in run_predict
output = await app.get_blocks().process_api(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1323, in process_api
result = await self.call_function(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\blocks.py", line 1067, in call_function
prediction = await utils.async_iteration(iterator)
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 339, in async_iteration
return await iterator.anext()
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 332, in anext
return await anyio.to_thread.run_sync(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\anyio_backends_asyncio.py", line 867, in run
result = context.run(func, *args)
File "K:\1ai\oog\oobabooga-windows\installer_files\env\lib\site-packages\gradio\utils.py", line 315, in run_sync_iterator_async
return next(iterator)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 319, in generate_chat_reply_wrapper
for i, history in enumerate(generate_chat_reply(text, shared.history, state, regenerate, _continue, loading_message=True)):
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 313, in generate_chat_reply
for history in chatbot_wrapper(text, history, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message):
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\chat.py", line 226, in chatbot_wrapper
prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\extensions.py", line 193, in apply_extensions
return EXTENSION_MAP[typ](*args, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\modules\extensions.py", line 80, in _apply_custom_generate_chat_prompt
return extension.custom_generate_chat_prompt(text, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\extensions\annoy_ltm\script.py", line 672, in custom_generate_chat_prompt
return generator.custom_generate_chat_prompt(user_input, state, **kwargs)
File "K:\1ai\oog\oobabooga-windows\text-generation-webui\extensions\annoy_ltm\script.py", line 519, in custom_generate_chat_prompt
loaded_annoy_index = AnnoyIndex(shared.model.model.config.hidden_size, 'angular')
AttributeError: 'Llama' object has no attribute 'config'

Originally posted by @emangamer in #2 (comment)

@YenRaven
Copy link
Owner Author

Will need to find a more broadly supported value I can use instead of shared.model.model.config.hidden_size and/or recreate the settings parameter to allow users who run into issues a workaround

@emangamer
Copy link

emangamer commented May 23, 2023

thank you for identifying the issue and making a ticket. I was using https://huggingface.co/TheBloke/Manticore-13B-GGML

@bbecausereasonss
Copy link

bbecausereasonss commented May 24, 2023

Same error in WSL, using Airoboros 13B by TheBloke. 4Bit 128.


failed to load character annoy metadata, generating from scratch...
building annoy index took 0.0015120506286621094 seconds...
Output generated in 23.00 seconds (8.65 tokens/s, 199 tokens, context 134, seed 744471981)
Traceback (most recent call last):
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/routes.py", line 422, in run_predict
output = await app.get_blocks().process_api(
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/blocks.py", line 1323, in process_api
result = await self.call_function(
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/blocks.py", line 1067, in call_function
prediction = await utils.async_iteration(iterator)
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/utils.py", line 336, in async_iteration
return await iterator.anext()
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/utils.py", line 329, in anext
return await anyio.to_thread.run_sync(
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/utils.py", line 312, in run_sync_iterator_async
return next(iterator)
File "/home/perplexity/text-generation-webui/modules/chat.py", line 327, in generate_chat_reply_wrapper
for i, history in enumerate(generate_chat_reply(text, shared.history, state, regenerate, _continue, loading_message=True)):
File "/home/perplexity/text-generation-webui/modules/chat.py", line 321, in generate_chat_reply
for history in chatbot_wrapper(text, history, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message):
File "/home/perplexity/text-generation-webui/modules/chat.py", line 230, in chatbot_wrapper
prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs)
File "/home/perplexity/text-generation-webui/modules/extensions.py", line 193, in apply_extensions
return EXTENSION_MAP[typ](*args, **kwargs)
File "/home/perplexity/text-generation-webui/modules/extensions.py", line 80, in _apply_custom_generate_chat_prompt
return extension.custom_generate_chat_prompt(text, state, **kwargs)
File "/home/perplexity/text-generation-webui/extensions/annoy_ltm/script.py", line 673, in custom_generate_chat_prompt
return generator.custom_generate_chat_prompt(user_input, state, **kwargs)
File "/home/perplexity/text-generation-webui/extensions/annoy_ltm/script.py", line 531, in custom_generate_chat_prompt
loaded_history_last_index = index_to_history_position[loaded_history_items-1]
KeyError: -1
Traceback (most recent call last):
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/routes.py", line 422, in run_predict
output = await app.get_blocks().process_api(
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/blocks.py", line 1323, in process_api
result = await self.call_function(
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/blocks.py", line 1067, in call_function
prediction = await utils.async_iteration(iterator)
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/utils.py", line 336, in async_iteration
return await iterator.anext()
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/utils.py", line 329, in anext
return await anyio.to_thread.run_sync(
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/home/perplexity/miniconda3/envs/textgen/lib/python3.10/site-packages/gradio/utils.py", line 312, in run_sync_iterator_async
return next(iterator)
File "/home/perplexity/text-generation-webui/modules/chat.py", line 327, in generate_chat_reply_wrapper
for i, history in enumerate(generate_chat_reply(text, shared.history, state, regenerate, _continue, loading_message=True)):
File "/home/perplexity/text-generation-webui/modules/chat.py", line 321, in generate_chat_reply
for history in chatbot_wrapper(text, history, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message):
File "/home/perplexity/text-generation-webui/modules/chat.py", line 230, in chatbot_wrapper
prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs)
File "/home/perplexity/text-generation-webui/modules/extensions.py", line 193, in apply_extensions
return EXTENSION_MAP[typ](*args, **kwargs)
File "/home/perplexity/text-generation-webui/modules/extensions.py", line 80, in _apply_custom_generate_chat_prompt
return extension.custom_generate_chat_prompt(text, state, **kwargs)
File "/home/perplexity/text-generation-webui/extensions/annoy_ltm/script.py", line 673, in custom_generate_chat_prompt
return generator.custom_generate_chat_prompt(user_input, state, **kwargs)
File "/home/perplexity/text-generation-webui/extensions/annoy_ltm/script.py", line 531, in custom_generate_chat_prompt
loaded_history_last_index = index_to_history_position[loaded_history_items-1]
KeyError: -1

@YenRaven
Copy link
Owner Author

@bbecausereasonss That looks to be an unrelated issue. I've created a new ticket for this #11 You may want to subscribe to that issue to know when a resolution is merged.

YenRaven added a commit that referenced this issue May 25, 2023
…dels and models that do not supply the hidden size in the config
YenRaven added a commit that referenced this issue May 25, 2023
…onfig

fix(#7): Add a get hidden size helper fuction to better cover GGML mo…
@YenRaven
Copy link
Owner Author

This issue should be fixed in the main branch.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants