Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

JSON Mode + Streaming + OpenAI API + Llama3 = never sends STOP, and a lot of whitespace after the JSON #4446

Open
odrobnik opened this issue May 15, 2024 · 2 comments
Labels
bug Something isn't working

Comments

@odrobnik
Copy link

What is the issue?

without JSON Mode the last few lines of the stream of Chunk objects is:

...
data: {"id":"chatcmpl-273","object":"chat.completion.chunk","created":1715761661,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" adventures"},"finish_reason":null}]}
data: {"id":"chatcmpl-273","object":"chat.completion.chunk","created":1715761661,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":".\"\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-273","object":"chat.completion.chunk","created":1715761661,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"}\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-273","object":"chat.completion.chunk","created":1715761661,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"```"},"finish_reason":null}]}
data: {"id":"chatcmpl-273","object":"chat.completion.chunk","created":1715761661,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":"stop"}]}
data: [DONE]

if I enable JSON Mode the last few lines of the stream look like this:

...
{"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761446,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" adventures"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761446,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":".\""},"finish_reason":null}]}

// here the JSON is over, what follows is only junk whitespace

data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761446,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"}\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761446,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" \n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"  "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" \n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761447,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"  "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n\n\n\n\n\n\n\n\n\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"  "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" \n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"     "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761448,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n \n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761449,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"      "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761449,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n        \n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761449,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"   "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761449,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":"\n\n\n"},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761449,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}
data: {"id":"chatcmpl-626","object":"chat.completion.chunk","created":1715761449,"model":"llama3","system_fingerprint":"fp_ollama","choices":[{"index":0,"delta":{"role":"assistant","content":" "},"finish_reason":null}]}

The JSON mode seems to be supported because the JSON begins correctly with { on the streamed version, but it fails to detect that it is done, goes into whitespace-junk-output-mode and then after some internal limit just stops generating, but never sends the finish_reason: "stop".

So there are three bugs:

  1. JSON Mode should not produce extra white space after the JSON
  2. JSON Mode should send a finish_reason: stop when the JSON is done
  3. when generation goes haywire and is aborted, there should still be a finish_reason sent and the stream should be ended. Because currently it would just hang waiting for further data.

OS

macOS

GPU

Apple

CPU

Apple

Ollama version

0.1.32

@odrobnik odrobnik added the bug Something isn't working label May 15, 2024
@joliss
Copy link

joliss commented May 16, 2024

The following issues are related to whitespace in JSON: #2577, #2623, PR #3784, PR #3785.

But it seems to me (though I'm unfamiliar with the Ollama codebase) that the junk issue that @odrobnik is seeing, where the model goes haywire, might be worth investigating on its own. Perhaps there's something else going wrong here.

@H0llyW00dzZ
Copy link

The following issues are related to whitespace in JSON: #2577, #2623, PR #3784, PR #3785.

But it seems to me (though I'm unfamiliar with the Ollama codebase) that the junk issue that @odrobnik is seeing, where the model goes haywire, might be worth investigating on its own. Perhaps there's something else going wrong here.

These PRs are not the correct solution, as the grammarJson is already correct.

HINT: To fix this issue, look in this complex function (for scanner.Scan()):

func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
	if err := s.sem.Acquire(ctx, 1); err != nil {
		slog.Error("Failed to acquire semaphore", "error", err)
		return err
	}
	defer s.sem.Release(1)

	// only allow maximum 10 "context shifts" to avoid infinite generation
	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
		req.Options.NumPredict = 10 * s.options.NumCtx
		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
	}

	request := map[string]any{
		"prompt":            req.Prompt,
		"stream":            true,
		"n_predict":         req.Options.NumPredict,
		"n_keep":            req.Options.NumKeep,
		"main_gpu":          req.Options.MainGPU,
		"temperature":       req.Options.Temperature,
		"top_k":             req.Options.TopK,
		"top_p":             req.Options.TopP,
		"tfs_z":             req.Options.TFSZ,
		"typical_p":         req.Options.TypicalP,
		"repeat_last_n":     req.Options.RepeatLastN,
		"repeat_penalty":    req.Options.RepeatPenalty,
		"presence_penalty":  req.Options.PresencePenalty,
		"frequency_penalty": req.Options.FrequencyPenalty,
		"mirostat":          req.Options.Mirostat,
		"mirostat_tau":      req.Options.MirostatTau,
		"mirostat_eta":      req.Options.MirostatEta,
		"penalize_nl":       req.Options.PenalizeNewline,
		"seed":              req.Options.Seed,
		"stop":              req.Options.Stop,
		"image_data":        req.Images,
		"cache_prompt":      true,
	}

	// Make sure the server is ready
	status, err := s.getServerStatusRetry(ctx)
	if err != nil {
		return err
	} else if status != ServerStatusReady {
		return fmt.Errorf("unexpected server status: %s", status.ToString())
	}

	if req.Format == "json" {
		request["grammar"] = jsonGrammar
		if !strings.Contains(strings.ToLower(req.Prompt), "json") {
			slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
		}
	}

	// Handling JSON marshaling with special characters unescaped.
	buffer := &bytes.Buffer{}
	enc := json.NewEncoder(buffer)
	enc.SetEscapeHTML(false)

	if err := enc.Encode(request); err != nil {
		return fmt.Errorf("failed to marshal data: %v", err)
	}

	endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
	serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
	if err != nil {
		return fmt.Errorf("error creating POST request: %v", err)
	}
	serverReq.Header.Set("Content-Type", "application/json")

	res, err := http.DefaultClient.Do(serverReq)
	if err != nil {
		return fmt.Errorf("POST predict: %v", err)
	}
	defer res.Body.Close()

	if res.StatusCode >= 400 {
		bodyBytes, err := io.ReadAll(res.Body)
		if err != nil {
			return fmt.Errorf("failed reading llm error response: %w", err)
		}
		log.Printf("llm predict error: %s", bodyBytes)
		return fmt.Errorf("%s", bodyBytes)
	}

	scanner := bufio.NewScanner(res.Body)
	buf := make([]byte, 0, maxBufferSize)
	scanner.Buffer(buf, maxBufferSize)

	// keep track of the last token generated, this is used to abort if the model starts looping
	var lastToken string
	var tokenRepeat int

	for scanner.Scan() {
		select {
		case <-ctx.Done():
			// This handles the request cancellation
			return ctx.Err()
		default:
			line := scanner.Bytes()
			if len(line) == 0 {
				continue
			}

			evt, ok := bytes.CutPrefix(line, []byte("data: "))
			if !ok {
				return fmt.Errorf("error parsing llm response stream: %s", line)
			}

			var c completion
			if err := json.Unmarshal(evt, &c); err != nil {
				return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
			}

			switch {
			case strings.TrimSpace(c.Content) == lastToken:
				tokenRepeat++
			default:
				lastToken = strings.TrimSpace(c.Content)
				tokenRepeat = 0
			}

			// 30 picked as an arbitrary max token repeat limit, modify as needed
			if tokenRepeat > 30 {
				slog.Debug("prediction aborted, token repeat limit reached")
				return ctx.Err()
			}

			if c.Content != "" {
				fn(CompletionResponse{
					Content: c.Content,
				})
			}

			if c.Stop {
				doneReason := "stop"
				if c.StoppedLimit {
					doneReason = "length"
				}

				fn(CompletionResponse{
					Done:               true,
					DoneReason:         doneReason,
					PromptEvalCount:    c.Timings.PromptN,
					PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
					EvalCount:          c.Timings.PredictedN,
					EvalDuration:       parseDurationMs(c.Timings.PredictedMS),
				})
				return nil
			}
		}
	}

	if err := scanner.Err(); err != nil {
		if strings.Contains(err.Error(), "unexpected EOF") {
			s.Close()
			msg := ""
			if s.status != nil && s.status.LastErrMsg != "" {
				msg = s.status.LastErrMsg
			}
			return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
		}

		return fmt.Errorf("error reading llm response: %v", err)
	}

	return nil
}

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
bug Something isn't working
Projects
None yet
Development

No branches or pull requests

3 participants