From 40b1c6f943c933407d2187bf0c90bb8cab7fb576 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 08:11:24 +0000 Subject: [PATCH 01/16] fix(schema): serialize ToolCallID and Reasoning in Messages.ToProto The ToProto conversion was dropping tool_call_id and reasoning_content even though both proto and Go fields existed, breaking multi-turn tool calling and reasoning passthrough to backends. --- core/schema/message.go | 8 ++++++-- core/schema/message_test.go | 18 ++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/core/schema/message.go b/core/schema/message.go index 79a30352e93e..24407165ec06 100644 --- a/core/schema/message.go +++ b/core/schema/message.go @@ -83,8 +83,12 @@ func (messages Messages) ToProto() []*proto.Message { } } - // Note: tool_call_id is not in schema.Message yet - // Reasoning field is now available in schema.Message but not yet in proto.Message + if message.ToolCallID != "" { + protoMessages[i].ToolCallId = message.ToolCallID + } + if message.Reasoning != nil { + protoMessages[i].ReasoningContent = *message.Reasoning + } } return protoMessages } diff --git a/core/schema/message_test.go b/core/schema/message_test.go index cd6f514e2518..8ebf3fa05184 100644 --- a/core/schema/message_test.go +++ b/core/schema/message_test.go @@ -237,6 +237,24 @@ var _ = Describe("LLM tests", func() { Expect(protoMessages[0].Content).To(Equal("")) }) + It("should serialize ToolCallID and Reasoning fields", func() { + reasoning := "thinking..." + messages := Messages{ + { + Role: "tool", + Content: "result", + ToolCallID: "call_123", + Reasoning: &reasoning, + }, + } + + protoMessages := messages.ToProto() + + Expect(protoMessages).To(HaveLen(1)) + Expect(protoMessages[0].ToolCallId).To(Equal("call_123")) + Expect(protoMessages[0].ReasoningContent).To(Equal("thinking...")) + }) + It("should handle message with array content containing non-text parts", func() { messages := Messages{ { From a30719f04a601f967c6fa6e60dbbe1ce041100b7 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 08:11:38 +0000 Subject: [PATCH 02/16] refactor(config): introduce backend hook system and migrate llama-cpp defaults Adds RegisterBackendHook/runBackendHooks so each backend can register default-filling functions that run during ModelConfig.SetDefaults(). Migrates the existing GGUF guessing logic into hooks_llamacpp.go, registered for both 'llama-cpp' and the empty backend (auto-detect). Removes the old guesser.go shim. --- core/config/backend_hooks.go | 30 +++++++++++++++++++++++ core/config/guesser.go | 46 ----------------------------------- core/config/hooks_llamacpp.go | 46 +++++++++++++++++++++++++++++++++++ core/config/model_config.go | 7 +++++- 4 files changed, 82 insertions(+), 47 deletions(-) create mode 100644 core/config/backend_hooks.go delete mode 100644 core/config/guesser.go create mode 100644 core/config/hooks_llamacpp.go diff --git a/core/config/backend_hooks.go b/core/config/backend_hooks.go new file mode 100644 index 000000000000..8b2403cbbef9 --- /dev/null +++ b/core/config/backend_hooks.go @@ -0,0 +1,30 @@ +package config + +// BackendDefaultsHook is called during Prepare() and can modify cfg. +// Only fills in values that are not already set by the user. +type BackendDefaultsHook func(cfg *ModelConfig, modelPath string) + +var backendHooks = map[string][]BackendDefaultsHook{} + +// RegisterBackendHook registers a hook for a backend name. +// Special keys: +// - "*" = global catch-all, runs for EVERY backend (before specific hooks) +// - "" = runs only when cfg.Backend is empty (auto-detect case) +// - "vllm", "llama-cpp" etc. = runs only for that specific backend +// +// Multiple hooks per key are supported; they run in registration order. +func RegisterBackendHook(backend string, hook BackendDefaultsHook) { + backendHooks[backend] = append(backendHooks[backend], hook) +} + +// runBackendHooks executes hooks in order: +// 1. "*" (global) hooks for every backend +// 2. Backend-specific hooks for cfg.Backend (includes "" when backend is empty) +func runBackendHooks(cfg *ModelConfig, modelPath string) { + for _, h := range backendHooks["*"] { + h(cfg, modelPath) + } + for _, h := range backendHooks[cfg.Backend] { + h(cfg, modelPath) + } +} diff --git a/core/config/guesser.go b/core/config/guesser.go deleted file mode 100644 index e4ca5b1415f9..000000000000 --- a/core/config/guesser.go +++ /dev/null @@ -1,46 +0,0 @@ -package config - -import ( - "os" - "path/filepath" - - gguf "github.com/gpustack/gguf-parser-go" - "github.com/mudler/xlog" -) - -func guessDefaultsFromFile(cfg *ModelConfig, modelPath string, defaultCtx int) { - if os.Getenv("LOCALAI_DISABLE_GUESSING") == "true" { - xlog.Debug("guessDefaultsFromFile: guessing disabled with LOCALAI_DISABLE_GUESSING") - return - } - - if modelPath == "" { - xlog.Debug("guessDefaultsFromFile: modelPath is empty") - return - } - - // We try to guess only if we don't have a template defined already - guessPath := filepath.Join(modelPath, cfg.ModelFileName()) - - defer func() { - if r := recover(); r != nil { - xlog.Error("guessDefaultsFromFile: panic while parsing gguf file") - } - }() - - defer func() { - if cfg.ContextSize == nil { - if defaultCtx == 0 { - defaultCtx = defaultContextSize - } - cfg.ContextSize = &defaultCtx - } - }() - - // try to parse the gguf file - f, err := gguf.ParseGGUFFile(guessPath) - if err == nil { - guessGGUFFromFile(cfg, f, defaultCtx) - return - } -} diff --git a/core/config/hooks_llamacpp.go b/core/config/hooks_llamacpp.go new file mode 100644 index 000000000000..7c2640cee39a --- /dev/null +++ b/core/config/hooks_llamacpp.go @@ -0,0 +1,46 @@ +package config + +import ( + "os" + "path/filepath" + + gguf "github.com/gpustack/gguf-parser-go" + "github.com/mudler/xlog" +) + +func init() { + // Register for both explicit llama-cpp and empty backend (auto-detect from GGUF file) + RegisterBackendHook("llama-cpp", llamaCppDefaults) + RegisterBackendHook("", llamaCppDefaults) +} + +func llamaCppDefaults(cfg *ModelConfig, modelPath string) { + if os.Getenv("LOCALAI_DISABLE_GUESSING") == "true" { + xlog.Debug("llamaCppDefaults: guessing disabled") + return + } + if modelPath == "" { + return + } + + guessPath := filepath.Join(modelPath, cfg.ModelFileName()) + + defer func() { + if r := recover(); r != nil { + xlog.Error("llamaCppDefaults: panic while parsing gguf file") + } + }() + + // Default context size if not set, regardless of whether GGUF parsing succeeds + defer func() { + if cfg.ContextSize == nil { + ctx := defaultContextSize + cfg.ContextSize = &ctx + } + }() + + f, err := gguf.ParseGGUFFile(guessPath) + if err == nil { + guessGGUFFromFile(cfg, f, 0) + } +} diff --git a/core/config/model_config.go b/core/config/model_config.go index 5f1780b7650d..4185d4f3ff9c 100644 --- a/core/config/model_config.go +++ b/core/config/model_config.go @@ -497,7 +497,12 @@ func (cfg *ModelConfig) SetDefaults(opts ...ConfigLoaderOption) { cfg.Debug = &trueV } - guessDefaultsFromFile(cfg, lo.modelPath, ctx) + // If a context size was provided via LoadOptions, apply it before hooks so they + // don't override it with their own defaults. + if ctx != 0 && cfg.ContextSize == nil { + cfg.ContextSize = &ctx + } + runBackendHooks(cfg, lo.modelPath) cfg.syncKnownUsecasesFromString() } From 6cf8263c3073990a936d1ba1266ee86d27cbd033 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 08:11:46 +0000 Subject: [PATCH 03/16] feat(config): add vLLM parser defaults hook and importer auto-detection Introduces parser_defaults.json mapping model families to vLLM tool_parser/reasoning_parser names, with longest-pattern-first matching. The vllmDefaults hook auto-fills tool_parser and reasoning_parser options at load time for known families, while the VLLMImporter writes the same values into generated YAML so users can review and edit them. Adds tests covering MatchParserDefaults, hook registration via SetDefaults, and the user-override behavior. --- core/config/hooks_test.go | 114 +++++++++++++++++++++++++++++++ core/config/hooks_vllm.go | 85 +++++++++++++++++++++++ core/config/parser_defaults.json | 33 +++++++++ core/gallery/importers/vllm.go | 12 ++++ 4 files changed, 244 insertions(+) create mode 100644 core/config/hooks_test.go create mode 100644 core/config/hooks_vllm.go create mode 100644 core/config/parser_defaults.json diff --git a/core/config/hooks_test.go b/core/config/hooks_test.go new file mode 100644 index 000000000000..b97077564e96 --- /dev/null +++ b/core/config/hooks_test.go @@ -0,0 +1,114 @@ +package config_test + +import ( + . "github.com/mudler/LocalAI/core/config" + "github.com/mudler/LocalAI/core/schema" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Backend hooks and parser defaults", func() { + Context("MatchParserDefaults", func() { + It("matches Qwen3 family", func() { + parsers := MatchParserDefaults("Qwen/Qwen3-8B") + Expect(parsers).NotTo(BeNil()) + Expect(parsers["tool_parser"]).To(Equal("hermes")) + Expect(parsers["reasoning_parser"]).To(Equal("qwen3")) + }) + + It("matches Qwen3.5 with longest-prefix-first", func() { + parsers := MatchParserDefaults("Qwen/Qwen3.5-9B") + Expect(parsers).NotTo(BeNil()) + Expect(parsers["tool_parser"]).To(Equal("qwen3_xml")) + }) + + It("matches Llama-3.3 not Llama-3.2", func() { + parsers := MatchParserDefaults("meta/Llama-3.3-70B-Instruct") + Expect(parsers).NotTo(BeNil()) + Expect(parsers["tool_parser"]).To(Equal("llama3_json")) + }) + + It("matches deepseek-r1", func() { + parsers := MatchParserDefaults("deepseek-ai/DeepSeek-R1") + Expect(parsers).NotTo(BeNil()) + Expect(parsers["reasoning_parser"]).To(Equal("deepseek_r1")) + Expect(parsers["tool_parser"]).To(Equal("deepseek_v3")) + }) + + It("returns nil for unknown families", func() { + Expect(MatchParserDefaults("acme/unknown-model-xyz")).To(BeNil()) + }) + }) + + Context("Backend hook registration and execution", func() { + It("runs registered hook for a backend", func() { + called := false + RegisterBackendHook("test-backend-hook", func(cfg *ModelConfig, modelPath string) { + called = true + cfg.Description = "modified-by-hook" + }) + + cfg := &ModelConfig{ + Backend: "test-backend-hook", + } + // Use the public Prepare path indirectly is heavy; instead exercise via vllmDefaults + // path, but here just call RegisterBackendHook + we know runBackendHooks is internal. + // Verify by leveraging Prepare on a fresh ModelConfig with no model path. + cfg.PredictionOptions = schema.PredictionOptions{} + + // Trigger via Prepare with empty options; this calls runBackendHooks internally. + cfg.SetDefaults() + Expect(called).To(BeTrue()) + Expect(cfg.Description).To(Equal("modified-by-hook")) + }) + }) + + Context("vllmDefaults hook", func() { + It("auto-sets parsers for known model families on vllm backend", func() { + cfg := &ModelConfig{ + Backend: "vllm", + PredictionOptions: schema.PredictionOptions{ + BasicModelRequest: schema.BasicModelRequest{ + Model: "Qwen/Qwen3-8B", + }, + }, + } + cfg.SetDefaults() + + foundTool := false + foundReasoning := false + for _, opt := range cfg.Options { + if opt == "tool_parser:hermes" { + foundTool = true + } + if opt == "reasoning_parser:qwen3" { + foundReasoning = true + } + } + Expect(foundTool).To(BeTrue()) + Expect(foundReasoning).To(BeTrue()) + }) + + It("does not override user-set tool_parser", func() { + cfg := &ModelConfig{ + Backend: "vllm", + Options: []string{"tool_parser:custom"}, + PredictionOptions: schema.PredictionOptions{ + BasicModelRequest: schema.BasicModelRequest{ + Model: "Qwen/Qwen3-8B", + }, + }, + } + cfg.SetDefaults() + + count := 0 + for _, opt := range cfg.Options { + if len(opt) >= len("tool_parser:") && opt[:len("tool_parser:")] == "tool_parser:" { + count++ + } + } + Expect(count).To(Equal(1)) + }) + }) +}) diff --git a/core/config/hooks_vllm.go b/core/config/hooks_vllm.go new file mode 100644 index 000000000000..3f7abd9b393a --- /dev/null +++ b/core/config/hooks_vllm.go @@ -0,0 +1,85 @@ +package config + +import ( + _ "embed" + "encoding/json" + "strings" + + "github.com/mudler/xlog" +) + +//go:embed parser_defaults.json +var parserDefaultsJSON []byte + +type parserDefaultsData struct { + Families map[string]map[string]string `json:"families"` + Patterns []string `json:"patterns"` +} + +var parsersData *parserDefaultsData + +func init() { + parsersData = &parserDefaultsData{} + if err := json.Unmarshal(parserDefaultsJSON, parsersData); err != nil { + xlog.Warn("failed to parse parser_defaults.json", "error", err) + } + + RegisterBackendHook("vllm", vllmDefaults) + RegisterBackendHook("vllm-omni", vllmDefaults) +} + +// MatchParserDefaults returns parser defaults for the best-matching model family. +// Returns nil if no family matches. Used both at load time (via hook) and at import time. +func MatchParserDefaults(modelID string) map[string]string { + if parsersData == nil || len(parsersData.Patterns) == 0 { + return nil + } + normalized := normalizeModelID(modelID) + for _, pattern := range parsersData.Patterns { + if strings.Contains(normalized, pattern) { + if family, ok := parsersData.Families[pattern]; ok { + return family + } + } + } + return nil +} + +func vllmDefaults(cfg *ModelConfig, modelPath string) { + // Check if user already set tool_parser or reasoning_parser in Options + hasToolParser := false + hasReasoningParser := false + for _, opt := range cfg.Options { + if strings.HasPrefix(opt, "tool_parser:") { + hasToolParser = true + } + if strings.HasPrefix(opt, "reasoning_parser:") { + hasReasoningParser = true + } + } + if hasToolParser && hasReasoningParser { + return + } + + // Try matching against Model field, then Name + parsers := MatchParserDefaults(cfg.Model) + if parsers == nil { + parsers = MatchParserDefaults(cfg.Name) + } + if parsers == nil { + return + } + + if !hasToolParser { + if tp, ok := parsers["tool_parser"]; ok { + cfg.Options = append(cfg.Options, "tool_parser:"+tp) + xlog.Debug("[parser_defaults] auto-set tool_parser", "parser", tp, "model", cfg.Model) + } + } + if !hasReasoningParser { + if rp, ok := parsers["reasoning_parser"]; ok { + cfg.Options = append(cfg.Options, "reasoning_parser:"+rp) + xlog.Debug("[parser_defaults] auto-set reasoning_parser", "parser", rp, "model", cfg.Model) + } + } +} diff --git a/core/config/parser_defaults.json b/core/config/parser_defaults.json new file mode 100644 index 000000000000..614e6df1ee6d --- /dev/null +++ b/core/config/parser_defaults.json @@ -0,0 +1,33 @@ +{ + "families": { + "qwen3.5": {"tool_parser": "qwen3_xml", "reasoning_parser": "qwen3"}, + "qwen3-coder": {"tool_parser": "qwen3_xml", "reasoning_parser": "qwen3"}, + "qwen3": {"tool_parser": "hermes", "reasoning_parser": "qwen3"}, + "qwen2.5": {"tool_parser": "hermes"}, + "qwq": {"reasoning_parser": "deepseek_r1"}, + "llama-4": {"tool_parser": "llama4_pythonic"}, + "llama-3.3": {"tool_parser": "llama3_json"}, + "llama-3.2": {"tool_parser": "llama3_json"}, + "llama-3.1": {"tool_parser": "llama3_json"}, + "mistral-nemo": {"tool_parser": "mistral", "reasoning_parser": "mistral"}, + "mistral-small": {"tool_parser": "mistral", "reasoning_parser": "mistral"}, + "mistral-large": {"tool_parser": "mistral", "reasoning_parser": "mistral"}, + "magistral": {"tool_parser": "mistral", "reasoning_parser": "mistral"}, + "deepseek-r1": {"tool_parser": "deepseek_v3", "reasoning_parser": "deepseek_r1"}, + "deepseek-v3": {"tool_parser": "deepseek_v3", "reasoning_parser": "deepseek_v3"}, + "glm-5": {"tool_parser": "glm47"}, + "glm-4": {"tool_parser": "glm45", "reasoning_parser": "glm45"}, + "gemma-4": {"tool_parser": "gemma4", "reasoning_parser": "gemma4"}, + "granite-4": {"tool_parser": "granite4", "reasoning_parser": "granite"}, + "minimax-m2.5": {"tool_parser": "minimax_m2", "reasoning_parser": "minimax_m2"}, + "minimax": {"tool_parser": "minimax_m2", "reasoning_parser": "minimax_m2"}, + "kimi-k2": {"tool_parser": "kimi_k2", "reasoning_parser": "kimi_k2"}, + "nemotron": {"reasoning_parser": "nemotron_v3"}, + "olmo": {"tool_parser": "olmo3", "reasoning_parser": "olmo3"}, + "ernie": {"tool_parser": "ernie45", "reasoning_parser": "ernie45"}, + "phi-4": {"tool_parser": "phi4_mini_json"}, + "gpt-oss": {"tool_parser": "openai", "reasoning_parser": "openai_gptoss"}, + "hermes": {"tool_parser": "hermes"} + }, + "patterns": ["qwen3.5","qwen3-coder","qwen3","qwen2.5","qwq","llama-4","llama-3.3","llama-3.2","llama-3.1","mistral-nemo","mistral-small","mistral-large","magistral","deepseek-r1","deepseek-v3","glm-5","glm-4","gemma-4","granite-4","minimax-m2.5","minimax","kimi-k2","nemotron","olmo","ernie","phi-4","gpt-oss","hermes"] +} diff --git a/core/gallery/importers/vllm.go b/core/gallery/importers/vllm.go index 88baef1fefa8..886405169308 100644 --- a/core/gallery/importers/vllm.go +++ b/core/gallery/importers/vllm.go @@ -88,6 +88,18 @@ func (i *VLLMImporter) Import(details Details) (gallery.ModelConfig, error) { // Apply per-model-family inference parameter defaults config.ApplyInferenceDefaults(&modelConfig, details.URI) + // Auto-detect tool_parser and reasoning_parser for known model families. + // Surfacing them in the generated YAML lets users see and edit the choices. + parsers := config.MatchParserDefaults(details.URI) + if parsers != nil { + if tp, ok := parsers["tool_parser"]; ok { + modelConfig.Options = append(modelConfig.Options, "tool_parser:"+tp) + } + if rp, ok := parsers["reasoning_parser"]; ok { + modelConfig.Options = append(modelConfig.Options, "reasoning_parser:"+rp) + } + } + data, err := yaml.Marshal(modelConfig) if err != nil { return gallery.ModelConfig{}, err From 6786f05c643d161db8de3d2762da76b5750fc41e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 08:19:14 +0000 Subject: [PATCH 04/16] feat(vllm): wire native tool/reasoning parsers + chat deltas + logprobs - Use vLLM's ToolParserManager/ReasoningParserManager to extract structured output (tool calls, reasoning content) instead of reimplementing parsing - Convert proto Messages to dicts and pass tools to apply_chat_template - Emit ChatDelta with content/reasoning_content/tool_calls in Reply - Extract prompt_tokens, completion_tokens, and logprobs from output - Replace boolean GuidedDecoding with proper GuidedDecodingParams from Grammar - Add TokenizeString and Free RPC methods - Fix missing `time` import used by load_video() --- backend/python/vllm/backend.py | 235 +++++++++++++++++++++++++++++++-- backend/python/vllm/test.py | 83 ++++++++++++ 2 files changed, 309 insertions(+), 9 deletions(-) diff --git a/backend/python/vllm/backend.py b/backend/python/vllm/backend.py index 07323c4241b1..14d12af1e22e 100644 --- a/backend/python/vllm/backend.py +++ b/backend/python/vllm/backend.py @@ -5,6 +5,9 @@ import signal import sys import os +import json +import time +import gc from typing import List from PIL import Image @@ -26,6 +29,25 @@ import base64 import io +# Version-compat imports — wrap in try/except for older vLLM versions +try: + from vllm.tool_parsers import ToolParserManager + HAS_TOOL_PARSERS = True +except ImportError: + HAS_TOOL_PARSERS = False + +try: + from vllm.reasoning import ReasoningParserManager + HAS_REASONING_PARSERS = True +except ImportError: + HAS_REASONING_PARSERS = False + +try: + from vllm.sampling_params import GuidedDecodingParams + HAS_GUIDED_DECODING = True +except ImportError: + HAS_GUIDED_DECODING = False + _ONE_DAY_IN_SECONDS = 60 * 60 * 24 # If MAX_WORKERS are specified in the environment use it, otherwise default to 1 @@ -69,6 +91,35 @@ def generate(self,prompt, max_new_tokens): break return decoded_text + def _parse_options(self, options_list): + """Parse Options[] key:value string list into a dict.""" + opts = {} + for opt in options_list: + if ":" not in opt: + continue + key, value = opt.split(":", 1) + opts[key.strip()] = value.strip() + return opts + + def _messages_to_dicts(self, messages): + """Convert proto Messages to list of dicts suitable for apply_chat_template().""" + result = [] + for msg in messages: + d = {"role": msg.role, "content": msg.content or ""} + if msg.name: + d["name"] = msg.name + if msg.tool_call_id: + d["tool_call_id"] = msg.tool_call_id + if msg.reasoning_content: + d["reasoning_content"] = msg.reasoning_content + if msg.tool_calls: + try: + d["tool_calls"] = json.loads(msg.tool_calls) + except json.JSONDecodeError: + pass + result.append(d) + return result + def Health(self, request, context): """ Returns a health check message. @@ -141,6 +192,27 @@ async def LoadModel(self, request, context): ) except Exception as err: return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") + + # Parse options for parser selection + opts = self._parse_options(request.Options) + + # Instantiate tool/reasoning parser classes (they'll be instantiated per-request with tokenizer) + self.tool_parser_cls = None + self.reasoning_parser_cls = None + if HAS_TOOL_PARSERS and opts.get("tool_parser"): + try: + self.tool_parser_cls = ToolParserManager.get_tool_parser(opts["tool_parser"]) + print(f"Loaded tool_parser: {opts['tool_parser']}", file=sys.stderr) + except Exception as e: + print(f"Failed to load tool_parser {opts.get('tool_parser')}: {e}", file=sys.stderr) + + if HAS_REASONING_PARSERS and opts.get("reasoning_parser"): + try: + self.reasoning_parser_cls = ReasoningParserManager.get_reasoning_parser(opts["reasoning_parser"]) + print(f"Loaded reasoning_parser: {opts['reasoning_parser']}", file=sys.stderr) + except Exception as e: + print(f"Failed to load reasoning_parser {opts.get('reasoning_parser')}: {e}", file=sys.stderr) + print("Model loaded successfully", file=sys.stderr) return backend_pb2.Result(message="Model loaded successfully", success=True) @@ -197,6 +269,38 @@ async def PredictStream(self, request, context): finally: await iterations.aclose() + async def TokenizeString(self, request, context): + if not hasattr(self, 'tokenizer') or self.tokenizer is None: + context.set_code(grpc.StatusCode.FAILED_PRECONDITION) + context.set_details("Model/tokenizer not loaded") + return backend_pb2.TokenizationResponse() + try: + tokens = self.tokenizer.encode(request.Prompt) + return backend_pb2.TokenizationResponse(length=len(tokens), tokens=tokens) + except Exception as e: + context.set_code(grpc.StatusCode.INTERNAL) + context.set_details(str(e)) + return backend_pb2.TokenizationResponse() + + async def Free(self, request, context): + try: + if hasattr(self, 'llm'): + del self.llm + if hasattr(self, 'tokenizer'): + del self.tokenizer + self.tool_parser_cls = None + self.reasoning_parser_cls = None + gc.collect() + try: + import torch + if torch.cuda.is_available(): + torch.cuda.empty_cache() + except ImportError: + pass + return backend_pb2.Result(success=True, message="Model freed") + except Exception as e: + return backend_pb2.Result(success=False, message=str(e)) + async def _predict(self, request, context, streaming=False): # Build the sampling parameters # NOTE: this must stay in sync with the vllm backend @@ -222,7 +326,6 @@ async def _predict(self, request, context, streaming=False): "SkipSpecialTokens": "skip_special_tokens", "SpacesBetweenSpecialTokens": "spaces_between_special_tokens", "TruncatePromptTokens": "truncate_prompt_tokens", - "GuidedDecoding": "guided_decoding", } sampling_params = SamplingParams(top_p=0.9, max_tokens=200) @@ -233,6 +336,14 @@ async def _predict(self, request, context, streaming=False): if value not in (None, 0, [], False): setattr(sampling_params, param_field, value) + # Guided decoding: use Grammar field to pass JSON schema or BNF + if HAS_GUIDED_DECODING and request.Grammar: + try: + json.loads(request.Grammar) # valid JSON = JSON schema + sampling_params.guided_decoding = GuidedDecodingParams(json=request.Grammar) + except json.JSONDecodeError: + sampling_params.guided_decoding = GuidedDecodingParams(grammar=request.Grammar) + # Extract image paths and process images prompt = request.Prompt @@ -244,7 +355,27 @@ async def _predict(self, request, context, streaming=False): # If tokenizer template is enabled and messages are provided instead of prompt, apply the tokenizer template if not request.Prompt and request.UseTokenizerTemplate and request.Messages: - prompt = self.tokenizer.apply_chat_template(request.Messages, tokenize=False, add_generation_prompt=True) + messages_dicts = self._messages_to_dicts(request.Messages) + template_kwargs = {"tokenize": False, "add_generation_prompt": True} + + # Pass tools for tool calling + if request.Tools: + try: + template_kwargs["tools"] = json.loads(request.Tools) + except json.JSONDecodeError: + pass + + # Enable thinking mode if requested + if request.Metadata.get("enable_thinking", "").lower() == "true": + template_kwargs["enable_thinking"] = True + + try: + prompt = self.tokenizer.apply_chat_template(messages_dicts, **template_kwargs) + except TypeError: + # Some tokenizers don't support tools/enable_thinking kwargs — retry without them + prompt = self.tokenizer.apply_chat_template( + messages_dicts, tokenize=False, add_generation_prompt=True + ) # Generate text using the LLM engine request_id = random_uuid() @@ -265,25 +396,26 @@ async def _predict(self, request, context, streaming=False): # Stream the results generated_text = "" + last_output = None try: async for request_output in outputs: iteration_text = request_output.outputs[0].text + last_output = request_output if streaming: # Remove text already sent as vllm concatenates the text from previous yields delta_iteration_text = iteration_text.removeprefix(generated_text) # Send the partial result - yield backend_pb2.Reply(message=bytes(delta_iteration_text, encoding='utf-8')) + yield backend_pb2.Reply( + message=bytes(delta_iteration_text, encoding='utf-8'), + chat_deltas=[backend_pb2.ChatDelta(content=delta_iteration_text)], + ) # Keep track of text generated generated_text = iteration_text finally: await outputs.aclose() - # If streaming, we already sent everything - if streaming: - return - # Remove the image files from /tmp folder for img_path in image_paths: try: @@ -291,8 +423,93 @@ async def _predict(self, request, context, streaming=False): except Exception as e: print(f"Error removing image file: {img_path}, {e}", file=sys.stderr) - # Sending the final generated text - yield backend_pb2.Reply(message=bytes(generated_text, encoding='utf-8')) + # Parse reasoning and tool calls from final text using vLLM's native parsers + content = generated_text + reasoning_content = "" + tool_calls_proto = [] + + if self.reasoning_parser_cls: + try: + rp = self.reasoning_parser_cls(self.tokenizer) + r, c = rp.extract_reasoning(generated_text, request=None) + reasoning_content = r or "" + content = c if c is not None else generated_text + except Exception as e: + print(f"Reasoning parser error: {e}", file=sys.stderr) + + if self.tool_parser_cls and request.Tools: + try: + tools = json.loads(request.Tools) + tp = self.tool_parser_cls(self.tokenizer, tools=tools) + info = tp.extract_tool_calls(content, request=None) + if info.tools_called: + content = info.content or "" + for i, tc in enumerate(info.tool_calls): + tool_calls_proto.append(backend_pb2.ToolCallDelta( + index=i, + id=tc.id, + name=tc.function.name, + arguments=tc.function.arguments, + )) + except Exception as e: + print(f"Tool parser error: {e}", file=sys.stderr) + + # Extract token counts + prompt_tokens = 0 + completion_tokens = 0 + if last_output is not None: + try: + prompt_tokens = len(last_output.prompt_token_ids or []) + except Exception: + pass + try: + completion_tokens = len(last_output.outputs[0].token_ids or []) + except Exception: + pass + + # Extract logprobs if requested + logprobs_bytes = b"" + if last_output is not None and request.Logprobs > 0: + try: + lp = last_output.outputs[0].logprobs + if lp: + logprobs_data = {"content": []} + for token_lp_dict in lp: + if token_lp_dict: + first_tok_id, first_lp = next(iter(token_lp_dict.items())) + logprobs_data["content"].append({ + "token": getattr(first_lp, "decoded_token", str(first_tok_id)), + "logprob": first_lp.logprob, + }) + logprobs_bytes = json.dumps(logprobs_data).encode("utf-8") + except Exception as e: + print(f"Logprobs extraction error: {e}", file=sys.stderr) + + chat_delta = backend_pb2.ChatDelta( + content=content, + reasoning_content=reasoning_content, + tool_calls=tool_calls_proto, + ) + + if streaming: + # Final chunk with structured data + yield backend_pb2.Reply( + message=b"", + prompt_tokens=prompt_tokens, + tokens=completion_tokens, + chat_deltas=[chat_delta], + logprobs=logprobs_bytes, + ) + return + + # Non-streaming: single Reply with everything + yield backend_pb2.Reply( + message=bytes(content, encoding='utf-8'), + prompt_tokens=prompt_tokens, + tokens=completion_tokens, + chat_deltas=[chat_delta], + logprobs=logprobs_bytes, + ) def load_image(self, image_path: str): """ diff --git a/backend/python/vllm/test.py b/backend/python/vllm/test.py index 827aa71a3e33..21aaf4cf785e 100644 --- a/backend/python/vllm/test.py +++ b/backend/python/vllm/test.py @@ -122,6 +122,89 @@ def test_sampling_params(self): self.tearDown() + def test_messages_to_dicts(self): + """ + Tests _messages_to_dicts conversion of proto Messages to dicts. + """ + import sys, os + sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + from backend import BackendServicer + servicer = BackendServicer() + msgs = [ + backend_pb2.Message(role="user", content="hello"), + backend_pb2.Message( + role="assistant", + content="", + tool_calls='[{"id":"call_1","type":"function","function":{"name":"foo","arguments":"{}"}}]', + reasoning_content="thinking...", + ), + backend_pb2.Message(role="tool", content="result", name="foo", tool_call_id="call_1"), + ] + result = servicer._messages_to_dicts(msgs) + self.assertEqual(len(result), 3) + self.assertEqual(result[0], {"role": "user", "content": "hello"}) + self.assertEqual(result[1]["reasoning_content"], "thinking...") + self.assertIsInstance(result[1]["tool_calls"], list) + self.assertEqual(result[1]["tool_calls"][0]["id"], "call_1") + self.assertEqual(result[2]["tool_call_id"], "call_1") + self.assertEqual(result[2]["name"], "foo") + + def test_parse_options(self): + """ + Tests _parse_options correctly parses key:value strings. + """ + import sys, os + sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + from backend import BackendServicer + servicer = BackendServicer() + opts = servicer._parse_options([ + "tool_parser:hermes", + "reasoning_parser:deepseek_r1", + "invalid_no_colon", + "key_with_colons:a:b:c", + ]) + self.assertEqual(opts["tool_parser"], "hermes") + self.assertEqual(opts["reasoning_parser"], "deepseek_r1") + self.assertEqual(opts["key_with_colons"], "a:b:c") + self.assertNotIn("invalid_no_colon", opts) + + def test_tokenize_string(self): + """ + Tests the TokenizeString RPC returns valid tokens. + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m")) + self.assertTrue(response.success) + resp = stub.TokenizeString(backend_pb2.PredictOptions(Prompt="Hello world")) + self.assertGreater(resp.length, 0) + self.assertEqual(len(resp.tokens), resp.length) + except Exception as err: + print(err) + self.fail("TokenizeString service failed") + finally: + self.tearDown() + + def test_free(self): + """ + Tests the Free RPC doesn't crash. + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m")) + self.assertTrue(response.success) + free_resp = stub.Free(backend_pb2.HealthMessage()) + self.assertTrue(free_resp.success) + except Exception as err: + print(err) + self.fail("Free service failed") + finally: + self.tearDown() + def test_embedding(self): """ This method tests if the embeddings are generated successfully From b215843807aeeabe24431299909732be0a8d02ab Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 08:19:32 +0000 Subject: [PATCH 05/16] feat(vllm): CPU support + shared utils + vllm-omni feature parity - Split vllm install per acceleration: move generic `vllm` out of requirements-after.txt into per-profile after files (cublas12, hipblas, intel) and add CPU wheel URL for cpu-after.txt - requirements-cpu.txt now pulls torch==2.7.0+cpu from PyTorch CPU index - backend/index.yaml: register cpu-vllm / cpu-vllm-development variants - New backend/python/common/vllm_utils.py: shared parse_options, messages_to_dicts, setup_parsers helpers (used by both vllm backends) - vllm-omni: replace hardcoded chat template with tokenizer.apply_chat_template, wire native parsers via shared utils, emit ChatDelta with token counts, add TokenizeString and Free RPCs, detect CPU and set VLLM_TARGET_DEVICE - Add test_cpu_inference.py: standalone script to validate CPU build with a small model (Qwen2.5-0.5B-Instruct) --- backend/index.yaml | 12 ++ backend/python/common/vllm_utils.py | 84 +++++++++ backend/python/vllm-omni/backend.py | 178 +++++++++++++++--- backend/python/vllm/requirements-after.txt | 3 +- .../python/vllm/requirements-cpu-after.txt | 1 + backend/python/vllm/requirements-cpu.txt | 5 +- .../vllm/requirements-cublas12-after.txt | 1 + .../vllm/requirements-hipblas-after.txt | 1 + .../python/vllm/requirements-intel-after.txt | 1 + backend/python/vllm/test_cpu_inference.py | 101 ++++++++++ 10 files changed, 358 insertions(+), 29 deletions(-) create mode 100644 backend/python/common/vllm_utils.py create mode 100644 backend/python/vllm/requirements-cpu-after.txt create mode 100644 backend/python/vllm/requirements-hipblas-after.txt create mode 100644 backend/python/vllm/requirements-intel-after.txt create mode 100644 backend/python/vllm/test_cpu_inference.py diff --git a/backend/index.yaml b/backend/index.yaml index a1f5688a81cc..d0f75a4ca3c9 100644 --- a/backend/index.yaml +++ b/backend/index.yaml @@ -197,6 +197,7 @@ amd: "rocm-vllm" intel: "intel-vllm" nvidia-cuda-12: "cuda12-vllm" + cpu: "cpu-vllm" - &vllm-omni name: "vllm-omni" license: apache-2.0 @@ -1563,6 +1564,7 @@ nvidia: "cuda12-vllm-development" amd: "rocm-vllm-development" intel: "intel-vllm-development" + cpu: "cpu-vllm-development" - !!merge <<: *vllm name: "cuda12-vllm" uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-vllm" @@ -1578,6 +1580,11 @@ uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-vllm" mirrors: - localai/localai-backends:latest-gpu-intel-vllm +- !!merge <<: *vllm + name: "cpu-vllm" + uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-vllm" + mirrors: + - localai/localai-backends:latest-cpu-vllm - !!merge <<: *vllm name: "cuda12-vllm-development" uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-vllm" @@ -1593,6 +1600,11 @@ uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-vllm" mirrors: - localai/localai-backends:master-gpu-intel-vllm +- !!merge <<: *vllm + name: "cpu-vllm-development" + uri: "quay.io/go-skynet/local-ai-backends:master-cpu-vllm" + mirrors: + - localai/localai-backends:master-cpu-vllm # vllm-omni - !!merge <<: *vllm-omni name: "vllm-omni-development" diff --git a/backend/python/common/vllm_utils.py b/backend/python/common/vllm_utils.py new file mode 100644 index 000000000000..bc05186639d2 --- /dev/null +++ b/backend/python/common/vllm_utils.py @@ -0,0 +1,84 @@ +"""Shared utilities for vLLM-based backends.""" +import json +import sys + + +def parse_options(options_list): + """Parse Options[] list of 'key:value' strings into a dict. + + Supports type inference for common cases (bool, int, float). + Used by LoadModel to extract backend-specific options. + """ + opts = {} + for opt in options_list: + if ":" not in opt: + continue + key, value = opt.split(":", 1) + key = key.strip() + value = value.strip() + # Try type conversion + if value.lower() in ("true", "false"): + opts[key] = value.lower() == "true" + else: + try: + opts[key] = int(value) + except ValueError: + try: + opts[key] = float(value) + except ValueError: + opts[key] = value + return opts + + +def messages_to_dicts(proto_messages): + """Convert proto Message objects to list of dicts for apply_chat_template(). + + Handles: role, content, name, tool_call_id, reasoning_content, tool_calls (JSON string -> list). + """ + result = [] + for msg in proto_messages: + d = {"role": msg.role, "content": msg.content or ""} + if msg.name: + d["name"] = msg.name + if msg.tool_call_id: + d["tool_call_id"] = msg.tool_call_id + if msg.reasoning_content: + d["reasoning_content"] = msg.reasoning_content + if msg.tool_calls: + try: + d["tool_calls"] = json.loads(msg.tool_calls) + except json.JSONDecodeError: + pass + result.append(d) + return result + + +def setup_parsers(opts): + """Return (tool_parser_cls, reasoning_parser_cls) tuple from opts dict. + + Uses vLLM's native ToolParserManager and ReasoningParserManager. + Returns (None, None) if vLLM is not installed or parsers not available. + """ + tool_parser_cls = None + reasoning_parser_cls = None + + tool_parser_name = opts.get("tool_parser") + reasoning_parser_name = opts.get("reasoning_parser") + + if tool_parser_name: + try: + from vllm.tool_parsers import ToolParserManager + tool_parser_cls = ToolParserManager.get_tool_parser(tool_parser_name) + print(f"[vllm_utils] Loaded tool_parser: {tool_parser_name}", file=sys.stderr) + except Exception as e: + print(f"[vllm_utils] Failed to load tool_parser {tool_parser_name}: {e}", file=sys.stderr) + + if reasoning_parser_name: + try: + from vllm.reasoning import ReasoningParserManager + reasoning_parser_cls = ReasoningParserManager.get_reasoning_parser(reasoning_parser_name) + print(f"[vllm_utils] Loaded reasoning_parser: {reasoning_parser_name}", file=sys.stderr) + except Exception as e: + print(f"[vllm_utils] Failed to load reasoning_parser {reasoning_parser_name}: {e}", file=sys.stderr) + + return tool_parser_cls, reasoning_parser_cls diff --git a/backend/python/vllm-omni/backend.py b/backend/python/vllm-omni/backend.py index 96eb8a111571..646af2a2e942 100644 --- a/backend/python/vllm-omni/backend.py +++ b/backend/python/vllm-omni/backend.py @@ -17,6 +17,8 @@ import os import base64 import io +import json +import gc from PIL import Image import torch @@ -30,6 +32,7 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'common')) sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'common')) from grpc_auth import get_auth_interceptors +from vllm_utils import parse_options, messages_to_dicts, setup_parsers from vllm_omni.entrypoints.omni import Omni @@ -148,23 +151,20 @@ def Health(self, request, context): def LoadModel(self, request, context): try: + # CPU detection: if no CUDA, default vLLM target device to CPU. + try: + if not torch.cuda.is_available(): + os.environ.setdefault("VLLM_TARGET_DEVICE", "cpu") + os.environ.setdefault("VLLM_CPU_KVCACHE_SPACE", "4") + except Exception: + pass + print(f"Loading model {request.Model}...", file=sys.stderr) print(f"Request {request}", file=sys.stderr) - # Parse options from request.Options (key:value pairs) - self.options = {} - for opt in request.Options: - if ":" not in opt: - continue - key, value = opt.split(":", 1) - # Convert value to appropriate type - if is_float(value): - value = float(value) - elif is_int(value): - value = int(value) - elif value.lower() in ["true", "false"]: - value = value.lower() == "true" - self.options[key] = value + # Parse options from request.Options using shared helper + self.options = parse_options(request.Options) + opts = self.options print(f"Options: {self.options}", file=sys.stderr) @@ -244,6 +244,24 @@ def LoadModel(self, request, context): omni_kwargs["max_model_len"] = request.MaxModelLen self.omni = Omni(**omni_kwargs) + + # Load tokenizer for LLM/TTS so chat templates work + if self.model_type in ("llm", "tts"): + try: + from vllm.transformers_utils.tokenizer import get_tokenizer + self.tokenizer = get_tokenizer( + request.Model, + trust_remote_code=opts.get("trust_remote_code", False), + ) + except Exception as e: + print(f"Failed to load tokenizer: {e}", file=sys.stderr) + self.tokenizer = None + else: + self.tokenizer = None + + # Setup optional tool / reasoning parsers + self.tool_parser_cls, self.reasoning_parser_cls = setup_parsers(opts) + print("Model loaded successfully", file=sys.stderr) return backend_pb2.Result(message="Model loaded successfully", success=True) @@ -466,14 +484,32 @@ def _predict(self, request, context, streaming=False): # Extract prompt if request.Prompt: prompt = request.Prompt - elif request.Messages and request.UseTokenizerTemplate: - # Build prompt from messages (simplified - would need tokenizer for full template) - prompt = "" - for msg in request.Messages: - role = msg.role - content = msg.content - prompt += f"<|im_start|>{role}\n{content}<|im_end|>\n" - prompt += "<|im_start|>assistant\n" + elif request.Messages: + if getattr(self, "tokenizer", None) is not None: + messages_dicts = messages_to_dicts(request.Messages) + template_kwargs = {"tokenize": False, "add_generation_prompt": True} + if request.Tools: + try: + template_kwargs["tools"] = json.loads(request.Tools) + except json.JSONDecodeError: + pass + try: + if request.Metadata.get("enable_thinking", "").lower() == "true": + template_kwargs["enable_thinking"] = True + except Exception: + pass + try: + prompt = self.tokenizer.apply_chat_template(messages_dicts, **template_kwargs) + except TypeError: + prompt = self.tokenizer.apply_chat_template( + messages_dicts, tokenize=False, add_generation_prompt=True + ) + else: + # Fallback: basic template + prompt = "" + for msg in request.Messages: + prompt += f"<|im_start|>{msg.role}\n{msg.content}<|im_end|>\n" + prompt += "<|im_start|>assistant\n" else: yield backend_pb2.Reply(message=bytes("", 'utf-8')) return @@ -539,20 +575,79 @@ def _predict(self, request, context, streaming=False): # Call omni.generate() (returns generator for LLM mode) omni_generator = self.omni.generate([inputs], sampling_params_list) - # Extract text from outputs + # Extract text from outputs and track token usage generated_text = "" + prompt_tokens = 0 + completion_tokens = 0 for stage_outputs in omni_generator: if stage_outputs.final_output_type == "text": for output in stage_outputs.request_output: - text_output = output.outputs[0].text + completion = output.outputs[0] + text_output = completion.text + # Track tokens when available + try: + if getattr(output, "prompt_token_ids", None) is not None: + prompt_tokens = len(output.prompt_token_ids) + if getattr(completion, "token_ids", None) is not None: + completion_tokens = len(completion.token_ids) + except Exception: + pass if streaming: # Remove already sent text (vllm concatenates) delta_text = text_output.removeprefix(generated_text) - yield backend_pb2.Reply(message=bytes(delta_text, encoding='utf-8')) + yield backend_pb2.Reply( + message=bytes(delta_text, encoding='utf-8'), + tokens=completion_tokens, + prompt_tokens=prompt_tokens, + ) generated_text = text_output if not streaming: - yield backend_pb2.Reply(message=bytes(generated_text, encoding='utf-8')) + # Build optional ChatDelta with parsed reasoning / tool calls + chat_deltas = [] + content_text = generated_text + reasoning_text = "" + tool_call_deltas = [] + + if self.reasoning_parser_cls is not None: + try: + parser = self.reasoning_parser_cls(self.tokenizer) if self.tokenizer else self.reasoning_parser_cls() + reasoning_text, content_text = parser.extract_reasoning_content(content_text, request=None) + reasoning_text = reasoning_text or "" + content_text = content_text or "" + except Exception as e: + print(f"reasoning_parser failed: {e}", file=sys.stderr) + + if self.tool_parser_cls is not None: + try: + parser = self.tool_parser_cls(self.tokenizer) if self.tokenizer else self.tool_parser_cls() + tool_info = parser.extract_tool_calls(content_text, request=None) + if getattr(tool_info, "tools_called", False): + content_text = tool_info.content or "" + for tc in tool_info.tool_calls or []: + fn = getattr(tc, "function", None) + tool_call_deltas.append(backend_pb2.ToolCallDelta( + index=getattr(tc, "index", 0) or 0, + id=getattr(tc, "id", "") or "", + name=getattr(fn, "name", "") if fn else "", + arguments=getattr(fn, "arguments", "") if fn else "", + )) + except Exception as e: + print(f"tool_parser failed: {e}", file=sys.stderr) + + if self.tool_parser_cls is not None or self.reasoning_parser_cls is not None: + chat_deltas.append(backend_pb2.ChatDelta( + content=content_text, + reasoning_content=reasoning_text, + tool_calls=tool_call_deltas, + )) + + yield backend_pb2.Reply( + message=bytes(generated_text, encoding='utf-8'), + tokens=completion_tokens, + prompt_tokens=prompt_tokens, + chat_deltas=chat_deltas, + ) except Exception as err: print(f"Error in Predict: {err}", file=sys.stderr) @@ -647,6 +742,37 @@ def TTS(self, request, context): traceback.print_exc() return backend_pb2.Result(success=False, message=f"Error generating TTS: {err}") + def TokenizeString(self, request, context): + if not hasattr(self, 'tokenizer') or self.tokenizer is None: + context.set_code(grpc.StatusCode.FAILED_PRECONDITION) + context.set_details("Model/tokenizer not loaded") + return backend_pb2.TokenizationResponse() + try: + tokens = self.tokenizer.encode(request.Prompt) + return backend_pb2.TokenizationResponse(length=len(tokens), tokens=tokens) + except Exception as e: + context.set_code(grpc.StatusCode.INTERNAL) + context.set_details(str(e)) + return backend_pb2.TokenizationResponse() + + def Free(self, request, context): + try: + if hasattr(self, 'omni'): + del self.omni + if hasattr(self, 'tokenizer'): + del self.tokenizer + self.tool_parser_cls = None + self.reasoning_parser_cls = None + gc.collect() + try: + if torch.cuda.is_available(): + torch.cuda.empty_cache() + except Exception: + pass + return backend_pb2.Result(success=True, message="Model freed") + except Exception as e: + return backend_pb2.Result(success=False, message=str(e)) + def serve(address): server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS), diff --git a/backend/python/vllm/requirements-after.txt b/backend/python/vllm/requirements-after.txt index 76f11f154037..b5000e6ca77a 100644 --- a/backend/python/vllm/requirements-after.txt +++ b/backend/python/vllm/requirements-after.txt @@ -1 +1,2 @@ -vllm \ No newline at end of file +# vllm is installed per-acceleration in requirements-{profile}-after.txt +# (cublas12, hipblas, intel, cpu) diff --git a/backend/python/vllm/requirements-cpu-after.txt b/backend/python/vllm/requirements-cpu-after.txt new file mode 100644 index 000000000000..20cf3d395fb7 --- /dev/null +++ b/backend/python/vllm/requirements-cpu-after.txt @@ -0,0 +1 @@ +https://github.com/vllm-project/vllm/releases/download/v0.8.5/vllm-0.8.5+cpu-cp38-abi3-manylinux_2_35_x86_64.whl diff --git a/backend/python/vllm/requirements-cpu.txt b/backend/python/vllm/requirements-cpu.txt index 16c7cbac50c0..d1e8822452c6 100644 --- a/backend/python/vllm/requirements-cpu.txt +++ b/backend/python/vllm/requirements-cpu.txt @@ -1,3 +1,4 @@ accelerate -torch==2.7.0 -transformers \ No newline at end of file +--extra-index-url https://download.pytorch.org/whl/cpu +torch==2.7.0+cpu +transformers diff --git a/backend/python/vllm/requirements-cublas12-after.txt b/backend/python/vllm/requirements-cublas12-after.txt index 9251ba608461..cab27c888e27 100644 --- a/backend/python/vllm/requirements-cublas12-after.txt +++ b/backend/python/vllm/requirements-cublas12-after.txt @@ -1 +1,2 @@ https://github.com/Dao-AILab/flash-attention/releases/download/v2.8.3/flash_attn-2.8.3+cu12torch2.7cxx11abiTRUE-cp310-cp310-linux_x86_64.whl +vllm diff --git a/backend/python/vllm/requirements-hipblas-after.txt b/backend/python/vllm/requirements-hipblas-after.txt new file mode 100644 index 000000000000..e7a6c7781dce --- /dev/null +++ b/backend/python/vllm/requirements-hipblas-after.txt @@ -0,0 +1 @@ +vllm diff --git a/backend/python/vllm/requirements-intel-after.txt b/backend/python/vllm/requirements-intel-after.txt new file mode 100644 index 000000000000..e7a6c7781dce --- /dev/null +++ b/backend/python/vllm/requirements-intel-after.txt @@ -0,0 +1 @@ +vllm diff --git a/backend/python/vllm/test_cpu_inference.py b/backend/python/vllm/test_cpu_inference.py new file mode 100644 index 000000000000..ff606b5bf167 --- /dev/null +++ b/backend/python/vllm/test_cpu_inference.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +"""End-to-end CPU inference smoke test for the vllm backend. + +Spawns the gRPC backend server, loads a small Qwen model, runs Predict, +TokenizeString, and Free, and verifies non-empty output. + +Usage: + python test_cpu_inference.py [--model MODEL_ID] [--addr HOST:PORT] + +Defaults to Qwen/Qwen2.5-0.5B-Instruct (Qwen3.5-0.6B is not yet published +on the HuggingFace hub at the time of writing). +""" +import argparse +import os +import subprocess +import sys +import time + +import grpc + +# Make sibling backend_pb2 importable +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, HERE) + +import backend_pb2 +import backend_pb2_grpc + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--model", default=os.environ.get("TEST_MODEL", "Qwen/Qwen2.5-0.5B-Instruct")) + parser.add_argument("--addr", default="127.0.0.1:50099") + parser.add_argument("--prompt", default="Hello, how are you?") + args = parser.parse_args() + + # Force CPU mode for vLLM + env = os.environ.copy() + env.setdefault("VLLM_TARGET_DEVICE", "cpu") + env.setdefault("VLLM_CPU_KVCACHE_SPACE", "4") + + server_proc = subprocess.Popen( + [sys.executable, os.path.join(HERE, "backend.py"), "--addr", args.addr], + env=env, + stdout=sys.stdout, + stderr=sys.stderr, + ) + + try: + # Wait for the server to come up + deadline = time.time() + 30 + channel = None + while time.time() < deadline: + try: + channel = grpc.insecure_channel(args.addr) + grpc.channel_ready_future(channel).result(timeout=2) + break + except Exception: + time.sleep(0.5) + if channel is None: + raise RuntimeError("backend server did not start in time") + + stub = backend_pb2_grpc.BackendStub(channel) + + print(f"[test] LoadModel({args.model})", flush=True) + load_resp = stub.LoadModel(backend_pb2.ModelOptions( + Model=args.model, + ContextSize=2048, + ), timeout=900) + assert load_resp.success, f"LoadModel failed: {load_resp.message}" + + print(f"[test] Predict prompt={args.prompt!r}", flush=True) + reply = stub.Predict(backend_pb2.PredictOptions( + Prompt=args.prompt, + Tokens=64, + Temperature=0.7, + TopP=0.9, + ), timeout=600) + text = reply.message.decode("utf-8") + print(f"[test] Predict output: {text!r}", flush=True) + assert text.strip(), "Predict returned empty text" + + print("[test] TokenizeString", flush=True) + tok_resp = stub.TokenizeString(backend_pb2.PredictOptions(Prompt="hello world"), timeout=30) + print(f"[test] TokenizeString length={tok_resp.length}", flush=True) + assert tok_resp.length > 0 + + print("[test] Free", flush=True) + free_resp = stub.Free(backend_pb2.MemoryUsageData(), timeout=30) + assert free_resp.success, f"Free failed: {free_resp.message}" + + print("[test] PASS", flush=True) + finally: + server_proc.terminate() + try: + server_proc.wait(timeout=10) + except subprocess.TimeoutExpired: + server_proc.kill() + + +if __name__ == "__main__": + main() From c2f73a987eab338bbeb22e72f6eab09732510bf1 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 08:58:57 +0000 Subject: [PATCH 06/16] fix(vllm): CPU build compatibility with vllm 0.14.1 Validated end-to-end on CPU with Qwen2.5-0.5B-Instruct (LoadModel, Predict, TokenizeString, Free all working). - requirements-cpu-after.txt: pin vllm to 0.14.1+cpu (pre-built wheel from GitHub releases) for x86_64 and aarch64. vllm 0.14.1 is the newest CPU wheel whose torch dependency resolves against published PyTorch builds (torch==2.9.1+cpu). Later vllm CPU wheels currently require torch==2.10.0+cpu which is only available on the PyTorch test channel with incompatible torchvision. - requirements-cpu.txt: bump torch to 2.9.1+cpu, add torchvision/torchaudio so uv resolves them consistently from the PyTorch CPU index. - install.sh: add --index-strategy=unsafe-best-match for CPU builds so uv can mix the PyTorch index and PyPI for transitive deps (matches the existing intel profile behaviour). - backend.py LoadModel: vllm >= 0.14 removed AsyncLLMEngine.get_model_config so the old code path errored out with AttributeError on model load. Switch to the new get_tokenizer()/tokenizer accessor with a fallback to building the tokenizer directly from request.Model. --- backend/python/vllm/backend.py | 27 ++++++++++++++----- backend/python/vllm/install.sh | 6 +++++ .../python/vllm/requirements-cpu-after.txt | 3 ++- backend/python/vllm/requirements-cpu.txt | 6 +++-- 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/backend/python/vllm/backend.py b/backend/python/vllm/backend.py index 14d12af1e22e..cfb69a684348 100644 --- a/backend/python/vllm/backend.py +++ b/backend/python/vllm/backend.py @@ -183,13 +183,26 @@ async def LoadModel(self, request, context): return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") try: - engine_model_config = await self.llm.get_model_config() - self.tokenizer = get_tokenizer( - engine_model_config.tokenizer, - tokenizer_mode=engine_model_config.tokenizer_mode, - trust_remote_code=engine_model_config.trust_remote_code, - truncation_side="left", - ) + # vLLM >= 0.14 removed get_model_config() on AsyncLLM; the tokenizer + # is either already loaded on the engine or can be built from the + # Model name directly. + tokenizer = None + if hasattr(self.llm, "get_tokenizer"): + try: + tokenizer = await self.llm.get_tokenizer() + except TypeError: + tokenizer = self.llm.get_tokenizer() + except Exception: + tokenizer = None + if tokenizer is None and hasattr(self.llm, "tokenizer"): + tokenizer = self.llm.tokenizer + if tokenizer is None: + tokenizer = get_tokenizer( + request.Model, + trust_remote_code=bool(request.TrustRemoteCode), + truncation_side="left", + ) + self.tokenizer = tokenizer except Exception as err: return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 7dcd29db4a92..66a809a92650 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -26,6 +26,12 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" fi +# CPU builds need unsafe-best-match to pull torch==2.10.0+cpu from the +# pytorch test channel while still resolving transformers/vllm from pypi. +if [ "x${BUILD_PROFILE}" == "xcpu" ]; then + EXTRA_PIP_INSTALL_FLAGS+=" --index-strategy=unsafe-best-match" +fi + # We don't embed this into the images as it is a large dependency and not always needed. # Besides, the speed inference are not actually usable in the current state for production use-cases. if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE:-}" == "xtrue" ]; then diff --git a/backend/python/vllm/requirements-cpu-after.txt b/backend/python/vllm/requirements-cpu-after.txt index 20cf3d395fb7..e5e4908f72f9 100644 --- a/backend/python/vllm/requirements-cpu-after.txt +++ b/backend/python/vllm/requirements-cpu-after.txt @@ -1 +1,2 @@ -https://github.com/vllm-project/vllm/releases/download/v0.8.5/vllm-0.8.5+cpu-cp38-abi3-manylinux_2_35_x86_64.whl +vllm @ https://github.com/vllm-project/vllm/releases/download/v0.14.1/vllm-0.14.1+cpu-cp38-abi3-manylinux_2_35_x86_64.whl ; platform_machine == "x86_64" +vllm @ https://github.com/vllm-project/vllm/releases/download/v0.14.1/vllm-0.14.1+cpu-cp38-abi3-manylinux_2_35_aarch64.whl ; platform_machine == "aarch64" diff --git a/backend/python/vllm/requirements-cpu.txt b/backend/python/vllm/requirements-cpu.txt index d1e8822452c6..5eeb8a708db6 100644 --- a/backend/python/vllm/requirements-cpu.txt +++ b/backend/python/vllm/requirements-cpu.txt @@ -1,4 +1,6 @@ -accelerate --extra-index-url https://download.pytorch.org/whl/cpu -torch==2.7.0+cpu +accelerate +torch==2.9.1+cpu +torchvision +torchaudio transformers From c99188f10694d8f6af7adaf5be5079dd1e84f7e6 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 09:15:16 +0000 Subject: [PATCH 07/16] fix(vllm): tool parser constructor compat + e2e tool calling test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Concrete vLLM tool parsers override the abstract base's __init__ and drop the tools kwarg (e.g. Hermes2ProToolParser only takes tokenizer). Instantiating with tools= raised TypeError which was silently caught, leaving chat_deltas.tool_calls empty. Retry the constructor without the tools kwarg on TypeError — tools aren't required by these parsers since extract_tool_calls finds tool syntax in the raw model output directly. Validated with Qwen/Qwen2.5-0.5B-Instruct + hermes parser on CPU: the backend correctly returns ToolCallDelta{name='get_weather', arguments='{"location": "Paris, France"}'} in ChatDelta. test_tool_calls.py is a standalone smoke test that spawns the gRPC backend, sends a chat completion with tools, and asserts the response contains a structured tool call. --- backend/python/vllm/backend.py | 8 +- backend/python/vllm/test_tool_calls.py | 134 +++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 1 deletion(-) create mode 100644 backend/python/vllm/test_tool_calls.py diff --git a/backend/python/vllm/backend.py b/backend/python/vllm/backend.py index cfb69a684348..95ae95a9d4e6 100644 --- a/backend/python/vllm/backend.py +++ b/backend/python/vllm/backend.py @@ -453,7 +453,13 @@ async def _predict(self, request, context, streaming=False): if self.tool_parser_cls and request.Tools: try: tools = json.loads(request.Tools) - tp = self.tool_parser_cls(self.tokenizer, tools=tools) + # Some concrete parsers only accept the tokenizer; only the + # abstract base declares the tools kwarg. Try with tools first, + # fall back to tokenizer-only. + try: + tp = self.tool_parser_cls(self.tokenizer, tools=tools) + except TypeError: + tp = self.tool_parser_cls(self.tokenizer) info = tp.extract_tool_calls(content, request=None) if info.tools_called: content = info.content or "" diff --git a/backend/python/vllm/test_tool_calls.py b/backend/python/vllm/test_tool_calls.py new file mode 100644 index 000000000000..12b36f6f26ca --- /dev/null +++ b/backend/python/vllm/test_tool_calls.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +"""End-to-end CPU tool-calling test for the vllm backend. + +Loads Qwen2.5-0.5B-Instruct with the hermes tool parser, sends a chat +completion with a `get_weather` tool, and checks that the reply's +ChatDelta contains a ToolCallDelta for that function. +""" +import argparse +import json +import os +import subprocess +import sys +import time + +import grpc + +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, HERE) + +import backend_pb2 +import backend_pb2_grpc + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--model", default="Qwen/Qwen2.5-0.5B-Instruct") + parser.add_argument("--addr", default="127.0.0.1:50098") + args = parser.parse_args() + + env = os.environ.copy() + env.setdefault("VLLM_TARGET_DEVICE", "cpu") + env.setdefault("VLLM_CPU_KVCACHE_SPACE", "4") + + server_proc = subprocess.Popen( + [sys.executable, os.path.join(HERE, "backend.py"), "--addr", args.addr], + env=env, + stdout=sys.stdout, + stderr=sys.stderr, + ) + + try: + deadline = time.time() + 30 + channel = None + while time.time() < deadline: + try: + channel = grpc.insecure_channel(args.addr) + grpc.channel_ready_future(channel).result(timeout=2) + break + except Exception: + time.sleep(0.5) + if channel is None: + raise RuntimeError("backend server did not start in time") + + stub = backend_pb2_grpc.BackendStub(channel) + + print(f"[test] LoadModel({args.model}) with hermes tool_parser", flush=True) + load_resp = stub.LoadModel(backend_pb2.ModelOptions( + Model=args.model, + ContextSize=2048, + Options=["tool_parser:hermes"], + ), timeout=900) + assert load_resp.success, f"LoadModel failed: {load_resp.message}" + + tools = [{ + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + }, + "required": ["location"], + }, + }, + }] + + messages = [ + backend_pb2.Message(role="system", content="You are a helpful assistant. Use the get_weather tool when the user asks about weather."), + backend_pb2.Message(role="user", content="What's the weather like in Paris, France?"), + ] + + print("[test] Predict with tool definitions", flush=True) + reply = stub.Predict(backend_pb2.PredictOptions( + Messages=messages, + Tools=json.dumps(tools), + ToolChoice="auto", + UseTokenizerTemplate=True, + Tokens=200, + Temperature=0.1, + ), timeout=600) + + text = reply.message.decode("utf-8") + print(f"[test] Raw message: {text!r}", flush=True) + print(f"[test] prompt_tokens={reply.prompt_tokens} tokens={reply.tokens}", flush=True) + print(f"[test] chat_deltas count: {len(reply.chat_deltas)}", flush=True) + + tool_calls_seen = [] + for delta in reply.chat_deltas: + print(f"[test] delta.content={delta.content!r}", flush=True) + print(f"[test] delta.reasoning_content={delta.reasoning_content!r}", flush=True) + for tc in delta.tool_calls: + print(f"[test] tool_call idx={tc.index} id={tc.id!r} name={tc.name!r} args={tc.arguments!r}", flush=True) + tool_calls_seen.append(tc) + + # Verify at least one tool call was extracted + assert len(tool_calls_seen) > 0, ( + "No tool calls in ChatDelta. " + f"Raw text was: {text!r}" + ) + assert any(tc.name == "get_weather" for tc in tool_calls_seen), ( + f"Expected get_weather tool call, got: {[tc.name for tc in tool_calls_seen]}" + ) + + print("[test] Free", flush=True) + stub.Free(backend_pb2.HealthMessage(), timeout=30) + + print("[test] PASS", flush=True) + return 0 + + finally: + try: + server_proc.terminate() + server_proc.wait(timeout=10) + except Exception: + server_proc.kill() + + +if __name__ == "__main__": + sys.exit(main()) From 034a60bf76b9659f8be0d6ff0e7a4791bd7ad3ae Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 09:43:04 +0000 Subject: [PATCH 08/16] ci(backend): build cpu-vllm container image Add the cpu-vllm variant to the backend container build matrix so the image registered in backend/index.yaml (cpu-vllm / cpu-vllm-development) is actually produced by CI. Follows the same pattern as the other CPU python backends (cpu-diffusers, cpu-chatterbox, etc.) with build-type='' and no CUDA. backend_pr.yml auto-picks this up via its matrix filter from backend.yml. --- .github/workflows/backend.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index d89ee06bf1ec..12dcc85f1f43 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -53,6 +53,19 @@ jobs: dockerfile: "./backend/Dockerfile.python" context: "./" ubuntu-version: '2204' + - build-type: '' + cuda-major-version: "" + cuda-minor-version: "" + platforms: 'linux/amd64' + tag-latest: 'auto' + tag-suffix: '-cpu-vllm' + runs-on: 'ubuntu-latest' + base-image: "ubuntu:24.04" + skip-drivers: 'true' + backend: "vllm" + dockerfile: "./backend/Dockerfile.python" + context: "./" + ubuntu-version: '2404' - build-type: '' cuda-major-version: "" cuda-minor-version: "" From e7f406169a765b98463eecb0dad78f3909ba39d2 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 14:51:58 +0000 Subject: [PATCH 09/16] test(e2e-backends): add tools capability + HF model name support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extends tests/e2e-backends to cover backends that: - Resolve HuggingFace model ids natively (vllm, vllm-omni) instead of loading a local file: BACKEND_TEST_MODEL_NAME is passed verbatim as ModelOptions.Model with no download/ModelFile. - Parse tool calls into ChatDelta.tool_calls: new "tools" capability sends a Predict with a get_weather function definition and asserts the Reply contains a matching ToolCallDelta. Uses UseTokenizerTemplate with OpenAI-style Messages so the backend can wire tools into the model's chat template. - Need backend-specific Options[]: BACKEND_TEST_OPTIONS lets a test set e.g. "tool_parser:hermes,reasoning_parser:qwen3" at LoadModel time. Adds make target test-extra-backend-vllm that: - docker-build-vllm - loads Qwen/Qwen2.5-0.5B-Instruct - runs health,load,predict,stream,tools with tool_parser:hermes Drops backend/python/vllm/test_{cpu_inference,tool_calls}.py — those standalone scripts were scaffolding used while bringing up the Python backend; the e2e-backends harness now covers the same ground uniformly alongside llama-cpp and ik-llama-cpp. --- Makefile | 21 +++- backend/python/vllm/test_cpu_inference.py | 101 ---------------- backend/python/vllm/test_tool_calls.py | 134 ---------------------- tests/e2e-backends/backend_test.go | 132 +++++++++++++++++++-- 4 files changed, 141 insertions(+), 247 deletions(-) delete mode 100644 backend/python/vllm/test_cpu_inference.py delete mode 100644 backend/python/vllm/test_tool_calls.py diff --git a/Makefile b/Makefile index 6dce83efdaa6..7f61666f58ea 100644 --- a/Makefile +++ b/Makefile @@ -466,8 +466,14 @@ test-extra: prepare-test-extra ## BACKEND_IMAGE Required. Docker image to test, e.g. local-ai-backend:llama-cpp. ## BACKEND_TEST_MODEL_URL URL of a model file to download and load. ## BACKEND_TEST_MODEL_FILE Path to an already-downloaded model (skips download). +## BACKEND_TEST_MODEL_NAME HuggingFace repo id (e.g. Qwen/Qwen2.5-0.5B-Instruct). +## Use this instead of MODEL_URL for backends that +## resolve HF model ids natively (vllm, vllm-omni). ## BACKEND_TEST_CAPS Comma-separated capabilities, default "health,load,predict,stream". +## Adds "tools" to exercise ChatDelta tool call extraction. ## BACKEND_TEST_PROMPT Override the prompt used in predict/stream specs. +## BACKEND_TEST_OPTIONS Comma-separated Options[] entries forwarded to LoadModel, +## e.g. "tool_parser:hermes,reasoning_parser:qwen3". ## ## Direct usage (image already built, no docker-build-* dependency): ## @@ -486,9 +492,13 @@ test-extra-backend: protogen-go BACKEND_IMAGE="$$BACKEND_IMAGE" \ BACKEND_TEST_MODEL_URL="$${BACKEND_TEST_MODEL_URL:-$(BACKEND_TEST_MODEL_URL)}" \ BACKEND_TEST_MODEL_FILE="$$BACKEND_TEST_MODEL_FILE" \ + BACKEND_TEST_MODEL_NAME="$$BACKEND_TEST_MODEL_NAME" \ BACKEND_TEST_CAPS="$$BACKEND_TEST_CAPS" \ BACKEND_TEST_PROMPT="$$BACKEND_TEST_PROMPT" \ - go test -v -timeout 15m ./tests/e2e-backends/... + BACKEND_TEST_OPTIONS="$$BACKEND_TEST_OPTIONS" \ + BACKEND_TEST_TOOL_PROMPT="$$BACKEND_TEST_TOOL_PROMPT" \ + BACKEND_TEST_TOOL_NAME="$$BACKEND_TEST_TOOL_NAME" \ + go test -v -timeout 30m ./tests/e2e-backends/... ## Convenience wrappers: build the image, then exercise it. test-extra-backend-llama-cpp: docker-build-llama-cpp @@ -497,6 +507,15 @@ test-extra-backend-llama-cpp: docker-build-llama-cpp test-extra-backend-ik-llama-cpp: docker-build-ik-llama-cpp BACKEND_IMAGE=local-ai-backend:ik-llama-cpp $(MAKE) test-extra-backend +## vllm is resolved from a HuggingFace model id (no file download) and +## exercises Predict + streaming + tool-call extraction via the hermes parser. +test-extra-backend-vllm: docker-build-vllm + BACKEND_IMAGE=local-ai-backend:vllm \ + BACKEND_TEST_MODEL_NAME=Qwen/Qwen2.5-0.5B-Instruct \ + BACKEND_TEST_CAPS=health,load,predict,stream,tools \ + BACKEND_TEST_OPTIONS=tool_parser:hermes \ + $(MAKE) test-extra-backend + DOCKER_IMAGE?=local-ai IMAGE_TYPE?=core BASE_IMAGE?=ubuntu:24.04 diff --git a/backend/python/vllm/test_cpu_inference.py b/backend/python/vllm/test_cpu_inference.py deleted file mode 100644 index ff606b5bf167..000000000000 --- a/backend/python/vllm/test_cpu_inference.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python3 -"""End-to-end CPU inference smoke test for the vllm backend. - -Spawns the gRPC backend server, loads a small Qwen model, runs Predict, -TokenizeString, and Free, and verifies non-empty output. - -Usage: - python test_cpu_inference.py [--model MODEL_ID] [--addr HOST:PORT] - -Defaults to Qwen/Qwen2.5-0.5B-Instruct (Qwen3.5-0.6B is not yet published -on the HuggingFace hub at the time of writing). -""" -import argparse -import os -import subprocess -import sys -import time - -import grpc - -# Make sibling backend_pb2 importable -HERE = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, HERE) - -import backend_pb2 -import backend_pb2_grpc - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--model", default=os.environ.get("TEST_MODEL", "Qwen/Qwen2.5-0.5B-Instruct")) - parser.add_argument("--addr", default="127.0.0.1:50099") - parser.add_argument("--prompt", default="Hello, how are you?") - args = parser.parse_args() - - # Force CPU mode for vLLM - env = os.environ.copy() - env.setdefault("VLLM_TARGET_DEVICE", "cpu") - env.setdefault("VLLM_CPU_KVCACHE_SPACE", "4") - - server_proc = subprocess.Popen( - [sys.executable, os.path.join(HERE, "backend.py"), "--addr", args.addr], - env=env, - stdout=sys.stdout, - stderr=sys.stderr, - ) - - try: - # Wait for the server to come up - deadline = time.time() + 30 - channel = None - while time.time() < deadline: - try: - channel = grpc.insecure_channel(args.addr) - grpc.channel_ready_future(channel).result(timeout=2) - break - except Exception: - time.sleep(0.5) - if channel is None: - raise RuntimeError("backend server did not start in time") - - stub = backend_pb2_grpc.BackendStub(channel) - - print(f"[test] LoadModel({args.model})", flush=True) - load_resp = stub.LoadModel(backend_pb2.ModelOptions( - Model=args.model, - ContextSize=2048, - ), timeout=900) - assert load_resp.success, f"LoadModel failed: {load_resp.message}" - - print(f"[test] Predict prompt={args.prompt!r}", flush=True) - reply = stub.Predict(backend_pb2.PredictOptions( - Prompt=args.prompt, - Tokens=64, - Temperature=0.7, - TopP=0.9, - ), timeout=600) - text = reply.message.decode("utf-8") - print(f"[test] Predict output: {text!r}", flush=True) - assert text.strip(), "Predict returned empty text" - - print("[test] TokenizeString", flush=True) - tok_resp = stub.TokenizeString(backend_pb2.PredictOptions(Prompt="hello world"), timeout=30) - print(f"[test] TokenizeString length={tok_resp.length}", flush=True) - assert tok_resp.length > 0 - - print("[test] Free", flush=True) - free_resp = stub.Free(backend_pb2.MemoryUsageData(), timeout=30) - assert free_resp.success, f"Free failed: {free_resp.message}" - - print("[test] PASS", flush=True) - finally: - server_proc.terminate() - try: - server_proc.wait(timeout=10) - except subprocess.TimeoutExpired: - server_proc.kill() - - -if __name__ == "__main__": - main() diff --git a/backend/python/vllm/test_tool_calls.py b/backend/python/vllm/test_tool_calls.py deleted file mode 100644 index 12b36f6f26ca..000000000000 --- a/backend/python/vllm/test_tool_calls.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python3 -"""End-to-end CPU tool-calling test for the vllm backend. - -Loads Qwen2.5-0.5B-Instruct with the hermes tool parser, sends a chat -completion with a `get_weather` tool, and checks that the reply's -ChatDelta contains a ToolCallDelta for that function. -""" -import argparse -import json -import os -import subprocess -import sys -import time - -import grpc - -HERE = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, HERE) - -import backend_pb2 -import backend_pb2_grpc - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--model", default="Qwen/Qwen2.5-0.5B-Instruct") - parser.add_argument("--addr", default="127.0.0.1:50098") - args = parser.parse_args() - - env = os.environ.copy() - env.setdefault("VLLM_TARGET_DEVICE", "cpu") - env.setdefault("VLLM_CPU_KVCACHE_SPACE", "4") - - server_proc = subprocess.Popen( - [sys.executable, os.path.join(HERE, "backend.py"), "--addr", args.addr], - env=env, - stdout=sys.stdout, - stderr=sys.stderr, - ) - - try: - deadline = time.time() + 30 - channel = None - while time.time() < deadline: - try: - channel = grpc.insecure_channel(args.addr) - grpc.channel_ready_future(channel).result(timeout=2) - break - except Exception: - time.sleep(0.5) - if channel is None: - raise RuntimeError("backend server did not start in time") - - stub = backend_pb2_grpc.BackendStub(channel) - - print(f"[test] LoadModel({args.model}) with hermes tool_parser", flush=True) - load_resp = stub.LoadModel(backend_pb2.ModelOptions( - Model=args.model, - ContextSize=2048, - Options=["tool_parser:hermes"], - ), timeout=900) - assert load_resp.success, f"LoadModel failed: {load_resp.message}" - - tools = [{ - "type": "function", - "function": { - "name": "get_weather", - "description": "Get the current weather for a location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - }, - "required": ["location"], - }, - }, - }] - - messages = [ - backend_pb2.Message(role="system", content="You are a helpful assistant. Use the get_weather tool when the user asks about weather."), - backend_pb2.Message(role="user", content="What's the weather like in Paris, France?"), - ] - - print("[test] Predict with tool definitions", flush=True) - reply = stub.Predict(backend_pb2.PredictOptions( - Messages=messages, - Tools=json.dumps(tools), - ToolChoice="auto", - UseTokenizerTemplate=True, - Tokens=200, - Temperature=0.1, - ), timeout=600) - - text = reply.message.decode("utf-8") - print(f"[test] Raw message: {text!r}", flush=True) - print(f"[test] prompt_tokens={reply.prompt_tokens} tokens={reply.tokens}", flush=True) - print(f"[test] chat_deltas count: {len(reply.chat_deltas)}", flush=True) - - tool_calls_seen = [] - for delta in reply.chat_deltas: - print(f"[test] delta.content={delta.content!r}", flush=True) - print(f"[test] delta.reasoning_content={delta.reasoning_content!r}", flush=True) - for tc in delta.tool_calls: - print(f"[test] tool_call idx={tc.index} id={tc.id!r} name={tc.name!r} args={tc.arguments!r}", flush=True) - tool_calls_seen.append(tc) - - # Verify at least one tool call was extracted - assert len(tool_calls_seen) > 0, ( - "No tool calls in ChatDelta. " - f"Raw text was: {text!r}" - ) - assert any(tc.name == "get_weather" for tc in tool_calls_seen), ( - f"Expected get_weather tool call, got: {[tc.name for tc in tool_calls_seen]}" - ) - - print("[test] Free", flush=True) - stub.Free(backend_pb2.HealthMessage(), timeout=30) - - print("[test] PASS", flush=True) - return 0 - - finally: - try: - server_proc.terminate() - server_proc.wait(timeout=10) - except Exception: - server_proc.kill() - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tests/e2e-backends/backend_test.go b/tests/e2e-backends/backend_test.go index a800a7ab549c..b6f59fd28d50 100644 --- a/tests/e2e-backends/backend_test.go +++ b/tests/e2e-backends/backend_test.go @@ -29,18 +29,30 @@ import ( // // BACKEND_TEST_MODEL_URL HTTP(S) URL of a model file to download before the test. // BACKEND_TEST_MODEL_FILE Path to an already-available model file (skips download). +// BACKEND_TEST_MODEL_NAME HuggingFace model id (e.g. "Qwen/Qwen2.5-0.5B-Instruct"). +// Passed verbatim as ModelOptions.Model; backends like vllm +// resolve it themselves and no local file is downloaded. // // Optional: // // BACKEND_TEST_CAPS Comma-separated list of capabilities to exercise. -// Supported values: health, load, predict, stream, embeddings. +// Supported values: health, load, predict, stream, +// embeddings, tools. // Defaults to "health,load,predict,stream". // A backend that only does embeddings would set this to // "health,load,embeddings"; an image/TTS backend that cannot // be driven by a text prompt can set it to "health,load". +// "tools" asks the backend to extract a tool call from the +// model output into ChatDelta.tool_calls. // BACKEND_TEST_PROMPT Override the prompt used by predict/stream specs. // BACKEND_TEST_CTX_SIZE Override the context size passed to LoadModel (default 512). // BACKEND_TEST_THREADS Override Threads passed to LoadModel (default 4). +// BACKEND_TEST_OPTIONS Comma-separated Options[] entries passed to LoadModel, +// e.g. "tool_parser:hermes,reasoning_parser:qwen3". +// BACKEND_TEST_TOOL_PROMPT Override the user prompt for the tools spec +// (default: "What's the weather like in Paris, France?"). +// BACKEND_TEST_TOOL_NAME Override the function name expected in the tool call +// (default: "get_weather"). // // The suite is intentionally model-format-agnostic: it only ever passes the // file path to LoadModel, so GGUF, ONNX, safetensors, .bin etc. all work so @@ -51,9 +63,12 @@ const ( capPredict = "predict" capStream = "stream" capEmbeddings = "embeddings" + capTools = "tools" - defaultPrompt = "The capital of France is" - streamPrompt = "Once upon a time" + defaultPrompt = "The capital of France is" + streamPrompt = "Once upon a time" + defaultToolPrompt = "What's the weather like in Paris, France?" + defaultToolName = "get_weather" ) func defaultCaps() map[string]bool { @@ -87,12 +102,14 @@ var _ = Describe("Backend container", Ordered, func() { caps map[string]bool workDir string binaryDir string - modelFile string + modelFile string // set when a local file is used + modelName string // set when a HuggingFace model id is used addr string serverCmd *exec.Cmd conn *grpc.ClientConn client pb.BackendClient prompt string + options []string ) BeforeAll(func() { @@ -101,8 +118,9 @@ var _ = Describe("Backend container", Ordered, func() { modelURL := os.Getenv("BACKEND_TEST_MODEL_URL") modelFile = os.Getenv("BACKEND_TEST_MODEL_FILE") - Expect(modelURL != "" || modelFile != "").To(BeTrue(), - "one of BACKEND_TEST_MODEL_URL or BACKEND_TEST_MODEL_FILE must be set") + modelName = os.Getenv("BACKEND_TEST_MODEL_NAME") + Expect(modelURL != "" || modelFile != "" || modelName != "").To(BeTrue(), + "one of BACKEND_TEST_MODEL_URL, BACKEND_TEST_MODEL_FILE, or BACKEND_TEST_MODEL_NAME must be set") caps = parseCaps() GinkgoWriter.Printf("Testing image=%q with capabilities=%v\n", image, keys(caps)) @@ -112,6 +130,15 @@ var _ = Describe("Backend container", Ordered, func() { prompt = defaultPrompt } + if raw := strings.TrimSpace(os.Getenv("BACKEND_TEST_OPTIONS")); raw != "" { + for _, opt := range strings.Split(raw, ",") { + opt = strings.TrimSpace(opt) + if opt != "" { + options = append(options, opt) + } + } + } + var err error workDir, err = os.MkdirTemp("", "backend-e2e-*") Expect(err).NotTo(HaveOccurred()) @@ -122,8 +149,8 @@ var _ = Describe("Backend container", Ordered, func() { extractImage(image, binaryDir) Expect(filepath.Join(binaryDir, "run.sh")).To(BeAnExistingFile()) - // Download the model once if not provided. - if modelFile == "" { + // Download the model once if not provided and no HF name given. + if modelFile == "" && modelName == "" { modelFile = filepath.Join(workDir, "model.bin") downloadFile(modelURL, modelFile) } @@ -196,16 +223,27 @@ var _ = Describe("Backend container", Ordered, func() { ctxSize := envInt32("BACKEND_TEST_CTX_SIZE", 512) threads := envInt32("BACKEND_TEST_THREADS", 4) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + // Prefer a HuggingFace model id when provided (e.g. for vllm); + // otherwise fall back to a downloaded/local file path. + modelRef := modelFile + var modelPath string + if modelName != "" { + modelRef = modelName + } else { + modelPath = modelFile + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) defer cancel() res, err := client.LoadModel(ctx, &pb.ModelOptions{ - Model: modelFile, - ModelFile: modelFile, + Model: modelRef, + ModelFile: modelPath, ContextSize: ctxSize, Threads: threads, NGPULayers: 0, MMap: true, NBatch: 128, + Options: options, }) Expect(err).NotTo(HaveOccurred()) Expect(res.GetSuccess()).To(BeTrue(), "LoadModel failed: %s", res.GetMessage()) @@ -275,6 +313,78 @@ var _ = Describe("Backend container", Ordered, func() { Expect(res.GetEmbeddings()).NotTo(BeEmpty(), "Embedding returned empty vector") GinkgoWriter.Printf("Embedding: %d dims\n", len(res.GetEmbeddings())) }) + + It("extracts tool calls into ChatDelta", func() { + if !caps[capTools] { + Skip("tools capability not enabled") + } + + toolPrompt := os.Getenv("BACKEND_TEST_TOOL_PROMPT") + if toolPrompt == "" { + toolPrompt = defaultToolPrompt + } + toolName := os.Getenv("BACKEND_TEST_TOOL_NAME") + if toolName == "" { + toolName = defaultToolName + } + + toolsJSON := fmt.Sprintf(`[{ + "type": "function", + "function": { + "name": %q, + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + } + }, + "required": ["location"] + } + } + }]`, toolName) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + res, err := client.Predict(ctx, &pb.PredictOptions{ + Messages: []*pb.Message{ + {Role: "system", Content: "You are a helpful assistant. Use the provided tool when the user asks about weather."}, + {Role: "user", Content: toolPrompt}, + }, + Tools: toolsJSON, + ToolChoice: "auto", + UseTokenizerTemplate: true, + Tokens: 200, + Temperature: 0.1, + }) + Expect(err).NotTo(HaveOccurred()) + + // Collect tool calls from every delta — some backends emit a single + // final delta, others stream incremental pieces in one Reply. + var toolCalls []*pb.ToolCallDelta + for _, delta := range res.GetChatDeltas() { + toolCalls = append(toolCalls, delta.GetToolCalls()...) + } + + GinkgoWriter.Printf("Tool call: raw=%q deltas=%d tool_calls=%d\n", + string(res.GetMessage()), len(res.GetChatDeltas()), len(toolCalls)) + + Expect(toolCalls).NotTo(BeEmpty(), + "Predict did not return any ToolCallDelta. raw=%q", string(res.GetMessage())) + + matched := false + for _, tc := range toolCalls { + GinkgoWriter.Printf(" - idx=%d id=%q name=%q args=%q\n", + tc.GetIndex(), tc.GetId(), tc.GetName(), tc.GetArguments()) + if tc.GetName() == toolName { + matched = true + } + } + Expect(matched).To(BeTrue(), + "Expected a tool call named %q in ChatDelta.tool_calls", toolName) + }) }) // extractImage runs `docker create` + `docker export` to materialise the image From c7f444d18bc3a5c201cf8d467e5d78a000f2477b Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 14:53:44 +0000 Subject: [PATCH 10/16] ci(test-extra): run vllm e2e tests on CPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds tests-vllm-grpc to the test-extra workflow, mirroring the llama-cpp and ik-llama-cpp gRPC jobs. Triggers when files under backend/python/vllm/ change (or on run-all), builds the local-ai vllm container image, and runs the tests/e2e-backends harness with BACKEND_TEST_MODEL_NAME=Qwen/Qwen2.5-0.5B-Instruct, tool_parser:hermes, and the tools capability enabled. Uses ubuntu-latest (no GPU) — vllm runs on CPU via the cpu-vllm wheel we pinned in requirements-cpu-after.txt. Frees disk space before the build since the docker image + torch + vllm wheel is sizeable. --- .github/workflows/test-extra.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index 6b590d15621f..b6c72b1a7848 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -31,6 +31,7 @@ jobs: llama-cpp-quantization: ${{ steps.detect.outputs.llama-cpp-quantization }} llama-cpp: ${{ steps.detect.outputs.llama-cpp }} ik-llama-cpp: ${{ steps.detect.outputs.ik-llama-cpp }} + vllm: ${{ steps.detect.outputs.vllm }} acestep-cpp: ${{ steps.detect.outputs.acestep-cpp }} qwen3-tts-cpp: ${{ steps.detect.outputs.qwen3-tts-cpp }} voxtral: ${{ steps.detect.outputs.voxtral }} @@ -501,6 +502,27 @@ jobs: - name: Build ik-llama-cpp backend image and run gRPC e2e tests run: | make test-extra-backend-ik-llama-cpp + tests-vllm-grpc: + needs: detect-changes + if: needs.detect-changes.outputs.vllm == 'true' || needs.detect-changes.outputs.run-all == 'true' + runs-on: ubuntu-latest + timeout-minutes: 120 + steps: + - name: Clone + uses: actions/checkout@v6 + with: + submodules: true + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '1.25.4' + - name: Free disk space + run: | + sudo rm -rf /usr/share/dotnet /opt/ghc /usr/local/lib/android /opt/hostedtoolcache/CodeQL || true + df -h + - name: Build vllm (cpu) backend image and run gRPC e2e tests + run: | + make test-extra-backend-vllm tests-acestep-cpp: needs: detect-changes if: needs.detect-changes.outputs.acestep-cpp == 'true' || needs.detect-changes.outputs.run-all == 'true' From 329df11989dba7a167041ac851c83e38fd06af78 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 15:14:42 +0000 Subject: [PATCH 11/16] fix(vllm): build from source on CI to avoid SIGILL on prebuilt wheel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The prebuilt vllm 0.14.1+cpu wheel from GitHub releases is compiled with SIMD instructions (AVX-512 VNNI/BF16 or AMX-BF16) that not every CPU supports. GitHub Actions ubuntu-latest runners SIGILL when vllm spawns the model_executor.models.registry subprocess for introspection, so LoadModel never reaches the actual inference path. - install.sh: when FROM_SOURCE=true on a CPU build, temporarily hide requirements-cpu-after.txt so installRequirements installs the base deps + torch CPU without pulling the prebuilt wheel, then clone vllm and compile it with VLLM_TARGET_DEVICE=cpu. The resulting binaries target the host's actual CPU. - backend/Dockerfile.python: accept a FROM_SOURCE build-arg and expose it as an ENV so install.sh sees it during `make`. - Makefile docker-build-backend: forward FROM_SOURCE as --build-arg when set, so backends that need source builds can opt in. - Makefile test-extra-backend-vllm: call docker-build-vllm via a recursive $(MAKE) invocation so FROM_SOURCE flows through. - .github/workflows/test-extra.yml: set FROM_SOURCE=true on the tests-vllm-grpc job. Slower but reliable — the prebuilt wheel only works on hosts that share the build-time SIMD baseline. Answers 'did you test locally?': yes, end-to-end on my local machine with the prebuilt wheel (CPU supports AVX-512 VNNI). The CI runner CPU gap was not covered locally — this commit plugs that gap. --- .github/workflows/test-extra.yml | 6 ++++ Makefile | 7 ++++- backend/Dockerfile.python | 5 ++++ backend/python/vllm/install.sh | 47 ++++++++++++++++++++++---------- 4 files changed, 49 insertions(+), 16 deletions(-) diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index b6c72b1a7848..a9f10e3fc98e 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -521,6 +521,12 @@ jobs: sudo rm -rf /usr/share/dotnet /opt/ghc /usr/local/lib/android /opt/hostedtoolcache/CodeQL || true df -h - name: Build vllm (cpu) backend image and run gRPC e2e tests + env: + # GitHub Actions runners don't all support the SIMD instructions + # the prebuilt vllm CPU wheel was compiled against (SIGILL in + # vllm.model_executor.models.registry on import). Build vllm from + # source so it targets the actual CI CPU. + FROM_SOURCE: "true" run: | make test-extra-backend-vllm tests-acestep-cpp: diff --git a/Makefile b/Makefile index 7f61666f58ea..4464a977485d 100644 --- a/Makefile +++ b/Makefile @@ -509,7 +509,11 @@ test-extra-backend-ik-llama-cpp: docker-build-ik-llama-cpp ## vllm is resolved from a HuggingFace model id (no file download) and ## exercises Predict + streaming + tool-call extraction via the hermes parser. -test-extra-backend-vllm: docker-build-vllm +## FROM_SOURCE=true passes through to Dockerfile.python → install.sh and +## compiles vllm locally instead of using the prebuilt CPU wheel — required +## on runners whose CPU doesn't support the wheel's baked-in SIMD. +test-extra-backend-vllm: + $(MAKE) docker-build-vllm BACKEND_IMAGE=local-ai-backend:vllm \ BACKEND_TEST_MODEL_NAME=Qwen/Qwen2.5-0.5B-Instruct \ BACKEND_TEST_CAPS=health,load,predict,stream,tools \ @@ -669,6 +673,7 @@ define docker-build-backend --build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \ --build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \ --build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \ + $(if $(FROM_SOURCE),--build-arg FROM_SOURCE=$(FROM_SOURCE)) \ $(if $(filter true,$(5)),--build-arg BACKEND=$(1)) \ -t local-ai-backend:$(1) -f backend/Dockerfile.$(2) $(3) endef diff --git a/backend/Dockerfile.python b/backend/Dockerfile.python index 5d2e6171eb62..e209815db011 100644 --- a/backend/Dockerfile.python +++ b/backend/Dockerfile.python @@ -195,6 +195,11 @@ COPY backend/backend.proto /${BACKEND}/backend.proto COPY backend/python/common/ /${BACKEND}/common COPY scripts/build/package-gpu-libs.sh /package-gpu-libs.sh +# Optional per-backend source build toggle (e.g. vllm on CPU needs to +# compile against the host SIMD instead of using the prebuilt wheel). +ARG FROM_SOURCE="" +ENV FROM_SOURCE=${FROM_SOURCE} + RUN cd /${BACKEND} && PORTABLE_PYTHON=true make # Package GPU libraries into the backend's lib directory diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 66a809a92650..de204e0a2d5d 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -32,20 +32,37 @@ if [ "x${BUILD_PROFILE}" == "xcpu" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --index-strategy=unsafe-best-match" fi -# We don't embed this into the images as it is a large dependency and not always needed. -# Besides, the speed inference are not actually usable in the current state for production use-cases. +# When FROM_SOURCE=true on a CPU build, skip the prebuilt wheel in +# requirements-cpu-after.txt and compile vllm locally against the host's +# actual CPU. The prebuilt CPU wheels from vllm releases are compiled with +# wider SIMD (AVX-512 VNNI/BF16 etc.) than some environments support — in +# particular GitHub Actions runners SIGILL on the vllm model registry +# subprocess. FROM_SOURCE=true avoids that at the cost of a longer install. if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE:-}" == "xtrue" ]; then - ensureVenv - # https://docs.vllm.ai/en/v0.6.1/getting_started/cpu-installation.html - if [ ! -d vllm ]; then - git clone https://github.com/vllm-project/vllm - fi - pushd vllm - uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.68.1 protobuf bitsandbytes - uv pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu - VLLM_TARGET_DEVICE=cpu python setup.py install - popd - rm -rf vllm - else - installRequirements + # Temporarily hide the prebuilt wheel so installRequirements doesn't + # pull it — the rest of the requirements files (base deps, torch, + # transformers) are still installed normally. + _cpu_after="${backend_dir}/requirements-cpu-after.txt" + _cpu_after_bak="" + if [ -f "${_cpu_after}" ]; then + _cpu_after_bak="${_cpu_after}.from-source.bak" + mv "${_cpu_after}" "${_cpu_after_bak}" + fi + installRequirements + if [ -n "${_cpu_after_bak}" ]; then + mv "${_cpu_after_bak}" "${_cpu_after}" + fi + + # Build vllm from source against the installed torch. + # https://docs.vllm.ai/en/latest/getting_started/installation/cpu/ + _vllm_src=$(mktemp -d) + trap 'rm -rf "${_vllm_src}"' EXIT + git clone --depth 1 https://github.com/vllm-project/vllm "${_vllm_src}/vllm" + pushd "${_vllm_src}/vllm" + uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm + # Respect pre-installed torch version — skip vllm's own requirements-build.txt torch pin. + VLLM_TARGET_DEVICE=cpu uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --no-deps . + popd +else + installRequirements fi From ea2bbabffd4a037cb1851a2be56dae577f058069 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 16:02:49 +0000 Subject: [PATCH 12/16] ci(vllm): use bigger-runner instead of source build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The prebuilt vllm 0.14.1+cpu wheel requires SIMD instructions (AVX-512 VNNI/BF16) that stock ubuntu-latest GitHub runners don't support — vllm.model_executor.models.registry SIGILLs on import during LoadModel. Source compilation works but takes 30-40 minutes per CI run, which is too slow for an e2e smoke test. Instead, switch tests-vllm-grpc to the bigger-runner self-hosted label (already used by backend.yml for the llama-cpp CUDA build) — that hardware has the required SIMD baseline and the prebuilt wheel runs cleanly. FROM_SOURCE=true is kept as an opt-in escape hatch: - install.sh still has the CPU source-build path for hosts that need it - backend/Dockerfile.python still declares the ARG + ENV - Makefile docker-build-backend still forwards the build-arg when set Default CI path uses the fast prebuilt wheel; source build can be re-enabled by exporting FROM_SOURCE=true in the environment. --- .github/workflows/test-extra.yml | 14 ++++++-------- Makefile | 9 ++++----- backend/Dockerfile.python | 5 +++-- backend/python/vllm/install.sh | 12 ++++++------ 4 files changed, 19 insertions(+), 21 deletions(-) diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index a9f10e3fc98e..592caffc7b11 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -505,8 +505,12 @@ jobs: tests-vllm-grpc: needs: detect-changes if: needs.detect-changes.outputs.vllm == 'true' || needs.detect-changes.outputs.run-all == 'true' - runs-on: ubuntu-latest - timeout-minutes: 120 + # The prebuilt vllm CPU wheel is compiled with AVX-512 VNNI/BF16 + # instructions; stock ubuntu-latest runners SIGILL on import of + # vllm.model_executor.models.registry. bigger-runner has newer + # hardware that supports the required SIMD. + runs-on: bigger-runner + timeout-minutes: 90 steps: - name: Clone uses: actions/checkout@v6 @@ -521,12 +525,6 @@ jobs: sudo rm -rf /usr/share/dotnet /opt/ghc /usr/local/lib/android /opt/hostedtoolcache/CodeQL || true df -h - name: Build vllm (cpu) backend image and run gRPC e2e tests - env: - # GitHub Actions runners don't all support the SIMD instructions - # the prebuilt vllm CPU wheel was compiled against (SIGILL in - # vllm.model_executor.models.registry on import). Build vllm from - # source so it targets the actual CI CPU. - FROM_SOURCE: "true" run: | make test-extra-backend-vllm tests-acestep-cpp: diff --git a/Makefile b/Makefile index 4464a977485d..7e2e35052482 100644 --- a/Makefile +++ b/Makefile @@ -509,11 +509,10 @@ test-extra-backend-ik-llama-cpp: docker-build-ik-llama-cpp ## vllm is resolved from a HuggingFace model id (no file download) and ## exercises Predict + streaming + tool-call extraction via the hermes parser. -## FROM_SOURCE=true passes through to Dockerfile.python → install.sh and -## compiles vllm locally instead of using the prebuilt CPU wheel — required -## on runners whose CPU doesn't support the wheel's baked-in SIMD. -test-extra-backend-vllm: - $(MAKE) docker-build-vllm +## Requires a host CPU with the SIMD instructions the prebuilt vllm CPU +## wheel was compiled against (AVX-512 VNNI/BF16); older CPUs will SIGILL +## on import — on CI this means using the bigger-runner label. +test-extra-backend-vllm: docker-build-vllm BACKEND_IMAGE=local-ai-backend:vllm \ BACKEND_TEST_MODEL_NAME=Qwen/Qwen2.5-0.5B-Instruct \ BACKEND_TEST_CAPS=health,load,predict,stream,tools \ diff --git a/backend/Dockerfile.python b/backend/Dockerfile.python index e209815db011..16159c67bbbd 100644 --- a/backend/Dockerfile.python +++ b/backend/Dockerfile.python @@ -195,8 +195,9 @@ COPY backend/backend.proto /${BACKEND}/backend.proto COPY backend/python/common/ /${BACKEND}/common COPY scripts/build/package-gpu-libs.sh /package-gpu-libs.sh -# Optional per-backend source build toggle (e.g. vllm on CPU needs to -# compile against the host SIMD instead of using the prebuilt wheel). +# Optional per-backend source build toggle (e.g. vllm on CPU can set +# FROM_SOURCE=true to compile against the build host SIMD instead of +# pulling a prebuilt wheel). Default empty — most backends ignore it. ARG FROM_SOURCE="" ENV FROM_SOURCE=${FROM_SOURCE} diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index de204e0a2d5d..cf6fa7efe1c3 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -32,12 +32,12 @@ if [ "x${BUILD_PROFILE}" == "xcpu" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --index-strategy=unsafe-best-match" fi -# When FROM_SOURCE=true on a CPU build, skip the prebuilt wheel in -# requirements-cpu-after.txt and compile vllm locally against the host's -# actual CPU. The prebuilt CPU wheels from vllm releases are compiled with -# wider SIMD (AVX-512 VNNI/BF16 etc.) than some environments support — in -# particular GitHub Actions runners SIGILL on the vllm model registry -# subprocess. FROM_SOURCE=true avoids that at the cost of a longer install. +# FROM_SOURCE=true on a CPU build skips the prebuilt vllm wheel in +# requirements-cpu-after.txt and compiles vllm locally against the host's +# actual CPU. Not used by default because it takes ~30-40 minutes, but +# kept here for hosts where the prebuilt wheel SIGILLs (CPU without the +# required SIMD baseline, e.g. AVX-512 VNNI/BF16). Default CI uses a +# bigger-runner with compatible hardware instead. if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE:-}" == "xtrue" ]; then # Temporarily hide the prebuilt wheel so installRequirements doesn't # pull it — the rest of the requirements files (base deps, torch, From c4dc495ea18c4bd0636cb69b209dc43e7322e99d Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 20:08:09 +0000 Subject: [PATCH 13/16] ci(vllm): install make + build deps on bigger-runner MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bigger-runner is a bare self-hosted runner used by backend.yml for docker image builds — it has docker but not the usual ubuntu-latest toolchain. The make-based test target needs make, build-essential (cgo in 'go test'), and curl/unzip (the Makefile protoc target downloads protoc from github releases). protoc-gen-go and protoc-gen-go-grpc come via 'go install' in the install-go-tools target, which setup-go makes possible. --- .github/workflows/test-extra.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index 592caffc7b11..5c2b270217fa 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -516,6 +516,14 @@ jobs: uses: actions/checkout@v6 with: submodules: true + - name: Dependencies + # bigger-runner is a bare self-hosted runner — install the tools + # we need for docker-build + protogen-go + go test (make, curl, + # unzip for the protoc download, build-essential for cgo). + run: | + sudo apt-get update + sudo apt-get install -y --no-install-recommends \ + make build-essential curl unzip ca-certificates git tar - name: Setup Go uses: actions/setup-go@v5 with: From 017bdee4e44acab75f16b23ce7dd9e9fc007812f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 20:18:13 +0000 Subject: [PATCH 14/16] ci(vllm): install libnuma1 + libgomp1 on bigger-runner The vllm 0.14.1+cpu wheel ships a _C C++ extension that dlopens libnuma.so.1 at import time. When the runner host doesn't have it, the extension silently fails to register its torch ops, so EngineCore crashes on init_device with: AttributeError: '_OpNamespace' '_C_utils' object has no attribute 'init_cpu_threads_env' Also add libgomp1 (OpenMP runtime, used by torch CPU kernels) to be safe on stripped-down runners. --- .github/workflows/test-extra.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index 5c2b270217fa..ad26cfcc7f72 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -519,11 +519,15 @@ jobs: - name: Dependencies # bigger-runner is a bare self-hosted runner — install the tools # we need for docker-build + protogen-go + go test (make, curl, - # unzip for the protoc download, build-essential for cgo). + # unzip for the protoc download, build-essential for cgo), plus + # libnuma1 which the vllm CPU wheel's _C extension dlopens at + # runtime (libnuma.so.1 missing → init_cpu_threads_env op is not + # registered → AttributeError on LoadModel). run: | sudo apt-get update sudo apt-get install -y --no-install-recommends \ - make build-essential curl unzip ca-certificates git tar + make build-essential curl unzip ca-certificates git tar \ + libnuma1 libgomp1 - name: Setup Go uses: actions/setup-go@v5 with: From d74cd56b144d7215eb6ba6f470d6ce969bf465bd Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 12 Apr 2026 20:20:21 +0000 Subject: [PATCH 15/16] feat(vllm): bundle libnuma/libgomp via package.sh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The vllm CPU wheel ships a _C extension that dlopens libnuma.so.1 at import time; torch's CPU kernels in turn use libgomp.so.1 (OpenMP). Without these on the host, vllm._C silently fails to register its torch ops and EngineCore crashes with: AttributeError: '_OpNamespace' '_C_utils' object has no attribute 'init_cpu_threads_env' Rather than asking every user to install libnuma1/libgomp1 on their host (or every LocalAI base image to ship them), bundle them into the backend image itself — same pattern fish-speech and the GPU libs already use. libbackend.sh adds ${EDIR}/lib to LD_LIBRARY_PATH at run time so the bundled copies are picked up automatically. - backend/python/vllm/package.sh (new): copies libnuma.so.1 and libgomp.so.1 from the builder's multilib paths into ${BACKEND}/lib, preserving soname symlinks. Runs during Dockerfile.python's 'Run backend-specific packaging' step (which already invokes package.sh if present). - backend/Dockerfile.python: install libnuma1 + libgomp1 in the builder stage so package.sh has something to copy (the Ubuntu base image otherwise only has libgomp in the gcc dep chain). - test-extra.yml: drop the workaround that installed these libs on the runner host — with the backend image self-contained, the runner no longer needs them, and the test now exercises the packaging path end-to-end the way a production host would. --- .github/workflows/test-extra.yml | 13 +++++---- backend/Dockerfile.python | 1 + backend/python/vllm/package.sh | 49 ++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 6 deletions(-) create mode 100755 backend/python/vllm/package.sh diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index ad26cfcc7f72..dc38029b3554 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -519,15 +519,16 @@ jobs: - name: Dependencies # bigger-runner is a bare self-hosted runner — install the tools # we need for docker-build + protogen-go + go test (make, curl, - # unzip for the protoc download, build-essential for cgo), plus - # libnuma1 which the vllm CPU wheel's _C extension dlopens at - # runtime (libnuma.so.1 missing → init_cpu_threads_env op is not - # registered → AttributeError on LoadModel). + # unzip for the protoc download, build-essential for cgo). + # Runtime shared libraries the vllm backend needs (libnuma, + # libgomp) are packaged into the backend image via package.sh + # and NOT installed on the host — that way the CI exercises the + # packaging path end-to-end and catches missing libs that users + # would otherwise hit on a bare production host. run: | sudo apt-get update sudo apt-get install -y --no-install-recommends \ - make build-essential curl unzip ca-certificates git tar \ - libnuma1 libgomp1 + make build-essential curl unzip ca-certificates git tar - name: Setup Go uses: actions/setup-go@v5 with: diff --git a/backend/Dockerfile.python b/backend/Dockerfile.python index 16159c67bbbd..f3bcf8d34710 100644 --- a/backend/Dockerfile.python +++ b/backend/Dockerfile.python @@ -29,6 +29,7 @@ RUN apt-get update && \ curl python3-pip \ python-is-python3 \ python3-dev llvm \ + libnuma1 libgomp1 \ python3-venv make cmake && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* diff --git a/backend/python/vllm/package.sh b/backend/python/vllm/package.sh new file mode 100755 index 000000000000..3c4ba8c198b6 --- /dev/null +++ b/backend/python/vllm/package.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Script to package runtime shared libraries for the vllm backend. +# +# The final Dockerfile.python stage is FROM scratch, so system libraries +# must be explicitly copied into ${BACKEND}/lib so the backend can run on +# any host without installing them. libbackend.sh automatically adds that +# directory to LD_LIBRARY_PATH at run time. +# +# vllm's CPU C++ extension (vllm._C) dlopens libnuma.so.1 at import time; +# if it's missing, the _C_utils torch ops are never registered and the +# engine crashes with AttributeError on init_cpu_threads_env. libgomp is +# used by torch's CPU kernels; on some stripped-down hosts it's also +# absent, so we bundle it too. + +set -e + +CURDIR=$(dirname "$(realpath "$0")") +LIB_DIR="${CURDIR}/lib" +mkdir -p "${LIB_DIR}" + +copy_with_symlinks() { + local soname="$1" + local hit="" + for dir in /usr/lib/x86_64-linux-gnu /usr/lib/aarch64-linux-gnu /lib/x86_64-linux-gnu /lib/aarch64-linux-gnu /usr/lib /lib; do + if [ -e "${dir}/${soname}" ]; then + hit="${dir}/${soname}" + break + fi + done + if [ -z "${hit}" ]; then + echo "warning: ${soname} not found in standard lib paths" >&2 + return 0 + fi + # Follow the symlink to the real file, copy it, then recreate the symlink. + local real + real=$(readlink -f "${hit}") + cp -v "${real}" "${LIB_DIR}/" + local real_base + real_base=$(basename "${real}") + if [ "${real_base}" != "${soname}" ]; then + ln -sf "${real_base}" "${LIB_DIR}/${soname}" + fi +} + +copy_with_symlinks libnuma.so.1 +copy_with_symlinks libgomp.so.1 + +echo "vllm packaging completed successfully" +ls -liah "${LIB_DIR}/" From cd56a05c3e387513262f7b1297b208a7c7f1523e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 13 Apr 2026 07:46:57 +0000 Subject: [PATCH 16/16] ci(vllm): disable tests-vllm-grpc job (heterogeneous runners) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both ubuntu-latest and bigger-runner have inconsistent CPU baselines: some instances support the AVX-512 VNNI/BF16 instructions the prebuilt vllm 0.14.1+cpu wheel was compiled with, others SIGILL on import of vllm.model_executor.models.registry. The libnuma packaging fix doesn't help when the wheel itself can't be loaded. FROM_SOURCE=true compiles vllm against the actual host CPU and works everywhere, but takes 30-50 minutes per run — too slow for a smoke test on every PR. Comment out the job for now. The test itself is intact and passes locally; run it via 'make test-extra-backend-vllm' on a host with the required SIMD baseline. Re-enable when: - we have a self-hosted runner label with guaranteed AVX-512 VNNI/BF16, or - vllm publishes a CPU wheel with a wider baseline, or - we set up a docker layer cache that makes FROM_SOURCE acceptable The detect-changes vllm output, the test harness changes (tests/ e2e-backends + tools cap), the make target (test-extra-backend-vllm), the package.sh and the Dockerfile/install.sh plumbing all stay in place. --- .github/workflows/test-extra.yml | 84 +++++++++++++++++--------------- 1 file changed, 46 insertions(+), 38 deletions(-) diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index dc38029b3554..afeebea82dc2 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -502,44 +502,52 @@ jobs: - name: Build ik-llama-cpp backend image and run gRPC e2e tests run: | make test-extra-backend-ik-llama-cpp - tests-vllm-grpc: - needs: detect-changes - if: needs.detect-changes.outputs.vllm == 'true' || needs.detect-changes.outputs.run-all == 'true' - # The prebuilt vllm CPU wheel is compiled with AVX-512 VNNI/BF16 - # instructions; stock ubuntu-latest runners SIGILL on import of - # vllm.model_executor.models.registry. bigger-runner has newer - # hardware that supports the required SIMD. - runs-on: bigger-runner - timeout-minutes: 90 - steps: - - name: Clone - uses: actions/checkout@v6 - with: - submodules: true - - name: Dependencies - # bigger-runner is a bare self-hosted runner — install the tools - # we need for docker-build + protogen-go + go test (make, curl, - # unzip for the protoc download, build-essential for cgo). - # Runtime shared libraries the vllm backend needs (libnuma, - # libgomp) are packaged into the backend image via package.sh - # and NOT installed on the host — that way the CI exercises the - # packaging path end-to-end and catches missing libs that users - # would otherwise hit on a bare production host. - run: | - sudo apt-get update - sudo apt-get install -y --no-install-recommends \ - make build-essential curl unzip ca-certificates git tar - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: '1.25.4' - - name: Free disk space - run: | - sudo rm -rf /usr/share/dotnet /opt/ghc /usr/local/lib/android /opt/hostedtoolcache/CodeQL || true - df -h - - name: Build vllm (cpu) backend image and run gRPC e2e tests - run: | - make test-extra-backend-vllm + # tests-vllm-grpc is currently disabled in CI. + # + # The prebuilt vllm CPU wheel is compiled with AVX-512 VNNI/BF16 + # instructions, and neither ubuntu-latest nor the bigger-runner pool + # offers a stable CPU baseline that supports them — runners come + # back with different hardware between runs and SIGILL on import of + # vllm.model_executor.models.registry. Compiling vllm from source + # via FROM_SOURCE=true works on any CPU but takes 30-50 minutes per + # run, which is too slow for a smoke test. + # + # The test itself (tests/e2e-backends + make test-extra-backend-vllm) + # is fully working and validated locally on a host with the right + # SIMD baseline. Run it manually with: + # + # make test-extra-backend-vllm + # + # Re-enable this job once we have a self-hosted runner label with + # guaranteed AVX-512 VNNI/BF16 support, or once the vllm project + # publishes a CPU wheel with a wider baseline. + # + # tests-vllm-grpc: + # needs: detect-changes + # if: needs.detect-changes.outputs.vllm == 'true' || needs.detect-changes.outputs.run-all == 'true' + # runs-on: bigger-runner + # timeout-minutes: 90 + # steps: + # - name: Clone + # uses: actions/checkout@v6 + # with: + # submodules: true + # - name: Dependencies + # run: | + # sudo apt-get update + # sudo apt-get install -y --no-install-recommends \ + # make build-essential curl unzip ca-certificates git tar + # - name: Setup Go + # uses: actions/setup-go@v5 + # with: + # go-version: '1.25.4' + # - name: Free disk space + # run: | + # sudo rm -rf /usr/share/dotnet /opt/ghc /usr/local/lib/android /opt/hostedtoolcache/CodeQL || true + # df -h + # - name: Build vllm (cpu) backend image and run gRPC e2e tests + # run: | + # make test-extra-backend-vllm tests-acestep-cpp: needs: detect-changes if: needs.detect-changes.outputs.acestep-cpp == 'true' || needs.detect-changes.outputs.run-all == 'true'