Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions docs/VALIDATOR.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,9 @@ Using Together AI and Open AI simplifies setup and reduces local resource requir

3. **Set Up the `.env` File**
```bash
echo "TOGETHERAI_API_KEY=your_together_ai_api_key" > .env
echo "OPENAI_API_KEY=your_openai_api_key" >> .env
echo "HF_TOKEN=your_hugging_face_token" >> .env (needed for some vLLM model)
echo "HF_TOKEN=your_hugging_face_token" >> .env (needed for some some datasets)
echo "WANDB_API_KEY=your_wandb_api_key" >> .env
echo "USE_TORCH=1" >> .env
```

Expand All @@ -130,8 +130,6 @@ Using Together AI and Open AI simplifies setup and reduces local resource requir
--wallet.name "your-wallet-name" \
--wallet.hotkey "your-hotkey-name" \
--subtensor.network finney \
--llm_client.base_urls "http://localhost:8000/v1,https://api.openai.com/v1,https://api.together.xyz/v1" \
--llm_client.models "Qwen/Qwen2.5-7B-Instruct,gpt-4o-mini,meta-llama/Llama-3.3-70B-Instruct-Turbo" \
--neuron_type validator \
--logging.debug
```
Expand Down
11 changes: 11 additions & 0 deletions install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,15 @@ pip uninstall uvloop -y
echo "Installing mathgenerator..."
pip install git+https://github.com/lukew3/mathgenerator.git

# add use torch to env echo "USE_TORCH=1" >> .env
echo "USE_TORCH=1" >> .env

# check if use_torch is set
if grep -q "USE_TORCH=1" .env; then
echo "Successfully set USE_TORCH=1"
else
echo "Failed to set USE_TORCH=1"
echo "Please set USE_TORCH=1 manually in the .env file"
fi

echo "Setup complete!"
6 changes: 4 additions & 2 deletions logicnet/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,14 +172,16 @@ def add_args(cls, parser):
"--llm_client.base_urls",
type=str,
help="The base url for the LLM client",
default="http://localhost:8000/v1,https://api.openai.com/v1,https://api.together.xyz/v1",
# default="http://localhost:8000/v1,https://api.openai.com/v1,https://api.together.xyz/v1",
default="null,https://api.openai.com/v1,null",
)

parser.add_argument(
"--llm_client.models",
type=str,
help="The model for the LLM client",
default="Qwen/Qwen2.5-7B-Instruct,gpt-4o-mini,meta-llama/Llama-3.3-70B-Instruct-Turbo",
# default="Qwen/Qwen2.5-7B-Instruct,gpt-4o-mini,meta-llama/Llama-3.3-70B-Instruct-Turbo",
default="null,gpt-4o,null",
)

parser.add_argument(
Expand Down
39 changes: 28 additions & 11 deletions neurons/validator/validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,21 +57,38 @@ def __init__(self, config=None):

base_urls = self.config.llm_client.base_urls.split(",")
models = self.config.llm_client.models.split(",")

# Ensure the lists have enough elements
if len(base_urls) < 3 or len(models) < 3:
bt.logging.warning("base_urls or models configuration is incomplete. Please ensure they have just 3 entries.")
raise ValueError("base_urls or models configuration is incomplete. Please ensure they have just 3 entries.")
# if len(base_urls) < 3 or len(models) < 3:
# bt.logging.warning("base_urls or models configuration is incomplete. Please ensure they have just 3 entries.")
# raise ValueError("base_urls or models configuration is incomplete. Please ensure they have just 3 entries.")

if len(base_urls) < 1 or len(models) < 1:
bt.logging.warning(
"base_urls or models configuration is incomplete. Please ensure they have at least 1 entry."
)
raise ValueError(
"base_urls or models configuration is incomplete. Please ensure they have at least 1 entry."
)

self.model_rotation_pool = {
"vllm": [base_urls[0].strip(), "xyz", models[0]],
"openai": [base_urls[1].strip(), openai_key, models[1]],
"togetherai": [base_urls[2].strip(), togetherai_key, models[2]],
# "vllm": [base_urls[0].strip(), "xyz", models[0]],
# "openai": [base_urls[1].strip(), openai_key, models[1]],
# "togetherai": [base_urls[2].strip(), togetherai_key, models[2]],
"openai": [base_urls[1].strip(), openai_key, 'gpt-4o'],
}
for key, value in self.model_rotation_pool.items():
if value[2] in model_blacklist:
bt.logging.warning(f"Model {value[2]} is blacklisted. Please use another model.")
self.model_rotation_pool[key] = "no use"
# for key, value in self.model_rotation_pool.items():
# if value[2] in model_blacklist:
# bt.logging.warning(f"Model {value[2]} is blacklisted. Please use another model.")
# self.model_rotation_pool[key] = "no use"

# Immediately blacklist if it's not "gpt-4o" and force it to be "gpt-4o"
if self.model_rotation_pool["openai"][2] != "gpt-4o":
bt.logging.warning(
f"Model must be gpt-4o. Found {self.model_rotation_pool['openai'][2]} instead."
)
bt.logging.info("Setting OpenAI model to gpt-4o.")
self.model_rotation_pool["openai"][2] = "gpt-4o"

# Check if 'null' is at the same index in both cli lsts
for i in range(3):
Expand Down