diff --git a/docs/VALIDATOR.md b/docs/VALIDATOR.md index a0579ee8..1ebb9be0 100644 --- a/docs/VALIDATOR.md +++ b/docs/VALIDATOR.md @@ -104,9 +104,9 @@ Using Together AI and Open AI simplifies setup and reduces local resource requir 3. **Set Up the `.env` File** ```bash - echo "TOGETHERAI_API_KEY=your_together_ai_api_key" > .env echo "OPENAI_API_KEY=your_openai_api_key" >> .env - echo "HF_TOKEN=your_hugging_face_token" >> .env (needed for some vLLM model) + echo "HF_TOKEN=your_hugging_face_token" >> .env (needed for some some datasets) + echo "WANDB_API_KEY=your_wandb_api_key" >> .env echo "USE_TORCH=1" >> .env ``` @@ -130,8 +130,6 @@ Using Together AI and Open AI simplifies setup and reduces local resource requir --wallet.name "your-wallet-name" \ --wallet.hotkey "your-hotkey-name" \ --subtensor.network finney \ - --llm_client.base_urls "http://localhost:8000/v1,https://api.openai.com/v1,https://api.together.xyz/v1" \ - --llm_client.models "Qwen/Qwen2.5-7B-Instruct,gpt-4o-mini,meta-llama/Llama-3.3-70B-Instruct-Turbo" \ --neuron_type validator \ --logging.debug ``` diff --git a/install.sh b/install.sh index d2f7c048..f84b60bc 100644 --- a/install.sh +++ b/install.sh @@ -15,4 +15,15 @@ pip uninstall uvloop -y echo "Installing mathgenerator..." pip install git+https://github.com/lukew3/mathgenerator.git +# add use torch to env echo "USE_TORCH=1" >> .env +echo "USE_TORCH=1" >> .env + +# check if use_torch is set +if grep -q "USE_TORCH=1" .env; then + echo "Successfully set USE_TORCH=1" +else + echo "Failed to set USE_TORCH=1" + echo "Please set USE_TORCH=1 manually in the .env file" +fi + echo "Setup complete!" diff --git a/logicnet/utils/config.py b/logicnet/utils/config.py index ae22ab68..871a6e3b 100644 --- a/logicnet/utils/config.py +++ b/logicnet/utils/config.py @@ -172,14 +172,16 @@ def add_args(cls, parser): "--llm_client.base_urls", type=str, help="The base url for the LLM client", - default="http://localhost:8000/v1,https://api.openai.com/v1,https://api.together.xyz/v1", + # default="http://localhost:8000/v1,https://api.openai.com/v1,https://api.together.xyz/v1", + default="null,https://api.openai.com/v1,null", ) parser.add_argument( "--llm_client.models", type=str, help="The model for the LLM client", - default="Qwen/Qwen2.5-7B-Instruct,gpt-4o-mini,meta-llama/Llama-3.3-70B-Instruct-Turbo", + # default="Qwen/Qwen2.5-7B-Instruct,gpt-4o-mini,meta-llama/Llama-3.3-70B-Instruct-Turbo", + default="null,gpt-4o,null", ) parser.add_argument( diff --git a/neurons/validator/validator.py b/neurons/validator/validator.py index 4ff96d46..ec45d7ba 100644 --- a/neurons/validator/validator.py +++ b/neurons/validator/validator.py @@ -57,21 +57,38 @@ def __init__(self, config=None): base_urls = self.config.llm_client.base_urls.split(",") models = self.config.llm_client.models.split(",") - + # Ensure the lists have enough elements - if len(base_urls) < 3 or len(models) < 3: - bt.logging.warning("base_urls or models configuration is incomplete. Please ensure they have just 3 entries.") - raise ValueError("base_urls or models configuration is incomplete. Please ensure they have just 3 entries.") + # if len(base_urls) < 3 or len(models) < 3: + # bt.logging.warning("base_urls or models configuration is incomplete. Please ensure they have just 3 entries.") + # raise ValueError("base_urls or models configuration is incomplete. Please ensure they have just 3 entries.") + + if len(base_urls) < 1 or len(models) < 1: + bt.logging.warning( + "base_urls or models configuration is incomplete. Please ensure they have at least 1 entry." + ) + raise ValueError( + "base_urls or models configuration is incomplete. Please ensure they have at least 1 entry." + ) self.model_rotation_pool = { - "vllm": [base_urls[0].strip(), "xyz", models[0]], - "openai": [base_urls[1].strip(), openai_key, models[1]], - "togetherai": [base_urls[2].strip(), togetherai_key, models[2]], + # "vllm": [base_urls[0].strip(), "xyz", models[0]], + # "openai": [base_urls[1].strip(), openai_key, models[1]], + # "togetherai": [base_urls[2].strip(), togetherai_key, models[2]], + "openai": [base_urls[1].strip(), openai_key, 'gpt-4o'], } - for key, value in self.model_rotation_pool.items(): - if value[2] in model_blacklist: - bt.logging.warning(f"Model {value[2]} is blacklisted. Please use another model.") - self.model_rotation_pool[key] = "no use" + # for key, value in self.model_rotation_pool.items(): + # if value[2] in model_blacklist: + # bt.logging.warning(f"Model {value[2]} is blacklisted. Please use another model.") + # self.model_rotation_pool[key] = "no use" + + # Immediately blacklist if it's not "gpt-4o" and force it to be "gpt-4o" + if self.model_rotation_pool["openai"][2] != "gpt-4o": + bt.logging.warning( + f"Model must be gpt-4o. Found {self.model_rotation_pool['openai'][2]} instead." + ) + bt.logging.info("Setting OpenAI model to gpt-4o.") + self.model_rotation_pool["openai"][2] = "gpt-4o" # Check if 'null' is at the same index in both cli lsts for i in range(3):