We are unlocking the power of different large language models, and you can compare the output for the same input prompt.
This is an open-source project with the following language model support added:
This repository is intended as a minimal example and a working demo is hosted at Olilo LLM
with ThreadPoolExecutor() as executor:
# Start the threads for each model
futures = [executor.submit(model.interact, prompt) for model in models]
# Retrieve and display the results
for i, future in enumerate(as_completed(futures)):
progress.progress((i+1)/len(models))
cols[i].write(future.result(), unsafe_allow_html=True)
from abc import ABC, abstractmethod
class ModelInterface(ABC):
@abstractmethod
def interact(self, prompt):
pass
@abstractmethod
def getName(self):
pass
class ModelOPENAI(ModelInterface):
def interact(self, prompt):
print('Loading ChatGPT...')
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": pre_prompt},
{"role": "user", "content": prompt}
]
)
print ("ChatGPT:",response["choices"][0]["message"]["content"])
return "<h3>ChatGPT4</h3>" +response["choices"][0]["message"]["content"]
except KeyError:
return "<h3>ChatGPT4</h3>" +"No result"
except Exception as e:
return f"An error occurred: {e}"
def getName(self):
return "CHATGPT4"
git clone git@github.com:oliloai/llms.git
cd llms
cp .env.example .env
nano .env
pip install -r requirements.txt
streamlit run app.py
python -m venv myenv
source myenv/bin/activate
pip install -r requirements.txt
Need help?
email us at hello@olilo.ai