diff --git a/common/arg.cpp b/common/arg.cpp
index 4115b2f7511d3..bb005689c1095 100644
--- a/common/arg.cpp
+++ b/common/arg.cpp
@@ -852,7 +852,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
params.input_prefix = value;
params.enable_chat_template = false;
}
- ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
+ ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL}));
add_opt(common_arg(
{"--in-suffix"}, "STRING",
"string to suffix after user inputs with (default: empty)",
@@ -860,7 +860,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
params.input_suffix = value;
params.enable_chat_template = false;
}
- ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
+ ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL}));
add_opt(common_arg(
{"--no-warmup"},
"skip warming up the model with an empty run",
diff --git a/examples/server/public/index.html b/examples/server/public/index.html
index 6216c08410a28..27570a1eeea67 100644
--- a/examples/server/public/index.html
+++ b/examples/server/public/index.html
@@ -214,6 +214,10 @@
Settings
+
+
+
+
@@ -285,6 +289,8 @@ Settings
// Note: in order not to introduce breaking changes, please keep the same data type (number, string, etc) if you want to change the default value. Do not use null or undefined for default value.
apiKey: '',
systemMessage: 'You are a helpful assistant.',
+ input_prefix: '',
+ input_suffix: '',
// make sure these default values are in sync with `common.h`
samplers: 'dkypmxt',
temperature: 0.8,
@@ -310,6 +316,8 @@ Settings
const CONFIG_INFO = {
apiKey: 'Set the API Key if you are using --api-key option for the server.',
systemMessage: 'The starting message that defines how model should behave.',
+ input_prefix: 'Prefix for user messages in custom chat templates.',
+ input_suffix: 'Suffix for user messages in custom chat templates.',
samplers: 'The order at which samplers are applied, in simplified way. Default is "dkypmxt": dry->top_k->typ_p->top_p->min_p->xtc->temperature',
temperature: 'Controls the randomness of the generated text by affecting the probability distribution of the output tokens. Higher = more random, lower = more focused.',
dynatemp_range: 'Addon for the temperature sampler. The added value to the range of dynamic temperature, which adjusts probabilities by entropy of tokens.',
@@ -559,6 +567,8 @@ Settings
stream: true,
cache_prompt: true,
samplers: this.config.samplers,
+ input_prefix: this.config.input_prefix,
+ input_suffix: this.config.input_suffix,
temperature: this.config.temperature,
dynatemp_range: this.config.dynatemp_range,
dynatemp_exponent: this.config.dynatemp_exponent,
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index b8e003be9730e..5ccc3ae0d2923 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -2829,7 +2829,7 @@ int main(int argc, char ** argv) {
return;
}
- json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template);
+ json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template, params.input_prefix, params.input_suffix);
std::vector tasks = ctx_server.create_tasks_inference(data, SERVER_TASK_INF_TYPE_COMPLETION);
ctx_server.queue_results.add_waiting_tasks(tasks);
@@ -3218,16 +3218,21 @@ int main(int argc, char ** argv) {
LOG_INF("%s: model loaded\n", __func__);
- // if a custom chat template is not supplied, we will use the one that comes with the model (if any)
+ // if a standard chat template is not chosen, check prefix and suffix to switch to custom formatting
+ // otherwise use the one that comes with the model (if any)
+ // if a standard chat template is chosen, warn about prefix and suffix not being used
if (params.chat_template.empty()) {
- if (!ctx_server.validate_model_chat_template()) {
+ if (!params.input_prefix.empty() || !params.input_suffix.empty()) {
+ LOG_WRN("%s: Prefix and suffix will be used for a custom chat template. This may cause the model to output suboptimal responses\n", __func__);
+ } else if (!ctx_server.validate_model_chat_template()) {
LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__);
- params.chat_template = "chatml";
}
+ } else if (!params.input_prefix.empty() || !params.input_suffix.empty()) {
+ LOG_WRN("%s: Prefix and suffix are defined, but will not be used because a chat template '%s' is chosen.\n", __func__, params.chat_template.c_str());
}
// print sample chat example to make it clear which template is used
- LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(), common_chat_format_example(ctx_server.model, params.chat_template).c_str());
+ LOG_INF("%s: chat template: '%s', built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.c_str(), params.chat_template.empty(), format_chat_example(ctx_server.model, params.chat_template, params.input_prefix, params.input_suffix).c_str());
ctx_server.queue_tasks.on_new_task(std::bind(
&server_context::process_single_task, &ctx_server, std::placeholders::_1));
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp
index c47ed3e47a76d..affb1d6697905 100644
--- a/examples/server/utils.hpp
+++ b/examples/server/utils.hpp
@@ -299,9 +299,12 @@ static llama_tokens format_infill(
return embd_inp;
}
-// Format given chat. If tmpl is empty, we take the template from model metadata
-inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector & messages) {
+// Format given chat. If tmpl is empty, we either use prefix and suffix (if defined), or take the template from model metadata
+inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::string & prefix, const std::string & suffix, const std::vector & messages) {
std::vector chat;
+ std::string formatted_chat;
+
+ bool is_custom = tmpl.empty() && (!prefix.empty() || !suffix.empty());
for (size_t i = 0; i < messages.size(); ++i) {
const auto & curr_msg = messages[i];
@@ -325,15 +328,49 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
}
- chat.push_back({role, content});
+ if (is_custom) {
+ // simple format using prefix and suffix
+ if (role == "user") formatted_chat += prefix + content + suffix;
+ else formatted_chat += content;
+ } else {
+ chat.push_back({role, content});
+ }
+ }
+
+ if (!is_custom) {
+ LOG_WRN("Using '%s' template, prefix and suffix are ignored.\n", tmpl.c_str());
+ formatted_chat = common_chat_apply_template(model, tmpl, chat, true);
+ } else {
+ LOG_WRN("Used prefix '%s' and suffix '%s'.\n", prefix.c_str(), suffix.c_str());
}
- const auto formatted_chat = common_chat_apply_template(model, tmpl, chat, true);
- LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
+ LOG_DBG("formatted_chat using '%s': '%s'\n", tmpl.c_str(), formatted_chat.c_str());
return formatted_chat;
}
+inline std::string format_chat_example(const struct llama_model * model, const std::string & tmpl, const std::string & prefix, const std::string & suffix) {
+ std::vector msgs = {
+ {"system", "You are a helpful assistant"},
+ {"user", "Hello"},
+ {"assistant", "Hi there"},
+ {"user", "How are you?"},
+ };
+
+ std::string formatted_example;
+
+ if (tmpl.empty() && (!prefix.empty() || !suffix.empty())) {
+ for (auto message : msgs) {
+ if (message.role == "user") formatted_example += prefix + message.content + suffix;
+ else formatted_example += message.content;
+ }
+ } else {
+ formatted_example = common_chat_apply_template(model, tmpl, msgs, true);
+ }
+
+ return formatted_example;
+}
+
static std::string llama_get_chat_template(const struct llama_model * model) {
std::string template_key = "tokenizer.chat_template";
// call with NULL buffer to get the total size of the string
@@ -597,13 +634,27 @@ static bool server_sent_event(httplib::DataSink & sink, const char * event, cons
static json oaicompat_completion_params_parse(
const struct llama_model * model,
const json & body, /* openai api json semantics */
- const std::string & chat_template) {
+ const std::string & chat_template,
+ const std::string & input_prefix,
+ const std::string & input_suffix) {
json llama_params;
llama_params["__oaicompat"] = true;
// Apply chat template to the list of messages
- llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
+ std::string chat_tmpl = (body.contains("chat_template") ? body.at("chat_template").get() : chat_template);
+ std::string prefix = (body.contains("input_prefix") ? body.at("input_prefix").get() : "");
+ std::string suffix = (body.contains("input_suffix") ? body.at("input_suffix").get() : "");
+
+ if (prefix.empty()) {
+ prefix = input_prefix;
+ }
+
+ if (suffix.empty()) {
+ suffix = input_suffix;
+ }
+
+ llama_params["prompt"] = format_chat(model, chat_tmpl, prefix, suffix, body.at("messages"));
// Handle "stop" field
if (body.contains("stop") && body.at("stop").is_string()) {