diff --git a/README.md b/README.md index ee12ee12..c2b7f6d2 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ git clone https://github.com/GradientHQ/parallax.git cd parallax # Enter Python virtual environment -python -m venv ./venv +python3 -m venv ./venv source ./venv/bin/activate pip install -e '.[mac]' @@ -79,6 +79,14 @@ pip install -e '.[dev]' After installing .exe, right click Windows start button and click ```Windows Terminal(Admin)``` to start a Powershell console as administrator. +❗ Make sure you open your terminal with administrator privileges. +#### Ways to run Windows Terminal as administrator + +- Start menu: Right‑click Start and choose “Windows Terminal (Admin)”, or search “Windows Terminal”, right‑click the result, and select “Run as administrator”. +- Run dialog: Press Win+R → type `wt` → press Ctrl+Shift+Enter. +- Task Manager: Press Ctrl+Shift+Esc → File → Run new task → enter `wt` → check “Create this task with administrator privileges”. +- File Explorer: Open the target folder → hold Ctrl+Shift → right‑click in the folder → select “Open in Terminal”. + Start Windows dependencies installation by simply typing this command in console: ```sh parallax install diff --git a/src/backend/server/static_config.py b/src/backend/server/static_config.py index 73f2fb83..05a8c177 100644 --- a/src/backend/server/static_config.py +++ b/src/backend/server/static_config.py @@ -10,21 +10,38 @@ "openai/gpt-oss-120b", "moonshotai/Kimi-K2-Instruct", "moonshotai/Kimi-K2-Instruct-0905", + "Qwen/Qwen3-Next-80B-A3B-Instruct", "Qwen/Qwen3-Next-80B-A3B-Instruct-FP8", + "Qwen/Qwen3-Next-80B-A3B-Thinking", "Qwen/Qwen3-Next-80B-A3B-Thinking-FP8", - # "Qwen/Qwen3-8B", - # "Qwen/Qwen3-8B-FP8", + "Qwen/Qwen3-0.6B", + "Qwen/Qwen3-0.6B-FP8", + "Qwen/Qwen3-1.7B", + "Qwen/Qwen3-1.7B-FP8", + "Qwen/Qwen3-4B", + "Qwen/Qwen3-4B-FP8", + "Qwen/Qwen3-4B-Instruct-2507", + "Qwen/Qwen3-4B-Instruct-2507-FP8", + "Qwen/Qwen3-4B-Thinking-2507", + "Qwen/Qwen3-4B-Thinking-2507-FP8", + "Qwen/Qwen3-8B", + "Qwen/Qwen3-8B-FP8", + "Qwen/Qwen3-14B", + "Qwen/Qwen3-14B-FP8", "Qwen/Qwen3-32B", "Qwen/Qwen3-32B-FP8", - # "Qwen/Qwen3-30B-A3B", - # "Qwen/Qwen3-30B-A3B-Instruct-2507-FP8", - # "Qwen/Qwen3-30B-A3B-Thinking-2507-FP8", + "Qwen/Qwen3-30B-A3B", + "Qwen/Qwen3-30B-A3B-Instruct-2507-FP8", + "Qwen/Qwen3-30B-A3B-Thinking-2507-FP8", "Qwen/Qwen3-235B-A22B-Instruct-2507-FP8", "Qwen/Qwen3-235B-A22B-Thinking-2507-FP8", "Qwen/Qwen3-235B-A22B-GPTQ-Int4", - # "Qwen/Qwen2.5-3B-Instruct", - # "Qwen/Qwen2.5-7B-Instruct", - # "Qwen/Qwen2.5-14B-Instruct", + "Qwen/Qwen2.5-0.5B-Instruct", + "Qwen/Qwen2.5-1.5B-Instruct", + "Qwen/Qwen2.5-3B-Instruct", + "Qwen/Qwen2.5-7B-Instruct", + "Qwen/Qwen2.5-14B-Instruct", + "Qwen/Qwen2.5-32B-Instruct", "Qwen/Qwen2.5-72B-Instruct", "nvidia/Llama-3.3-70B-Instruct-FP8", "nvidia/Llama-3.1-70B-Instruct-FP8",