diff --git a/tools/server/public_simplechat/index.html b/tools/server/public_simplechat/index.html
index f6413016fcc53..3cd840569c3a7 100644
--- a/tools/server/public_simplechat/index.html
+++ b/tools/server/public_simplechat/index.html
@@ -40,6 +40,19 @@
You need to have javascript enabled.
+
diff --git a/tools/server/public_simplechat/readme.md b/tools/server/public_simplechat/readme.md
index 24e026d455b03..d50588cce5e78 100644
--- a/tools/server/public_simplechat/readme.md
+++ b/tools/server/public_simplechat/readme.md
@@ -7,7 +7,7 @@ by Humans for All.
To run from the build dir
-bin/llama-server -m path/model.gguf --path ../tools/server/public_simplechat
+bin/llama-server -m path/model.gguf --path ../tools/server/public_simplechat --jinja
Continue reading for the details.
@@ -33,6 +33,10 @@ Allows developer/end-user to control some of the behaviour by updating gMe membe
console. Parallely some of the directly useful to end-user settings can also be changed using the provided
settings ui.
+For GenAi/LLM models supporting tool / function calling, allows one to interact with them and explore use of
+ai driven augmenting of the knowledge used for generating answers by using the predefined tools/functions.
+The end user is provided control over tool calling and response submitting.
+
NOTE: Current web service api doesnt expose the model context length directly, so client logic doesnt provide
any adaptive culling of old messages nor of replacing them with summary of their content etal. However there
is a optional sliding window based chat logic, which provides a simple minded culling of old messages from
@@ -64,6 +68,16 @@ next run this web front end in tools/server/public_simplechat
* cd ../tools/server/public_simplechat
* python3 -m http.server PORT
+### for tool calling
+
+remember to
+
+* pass --jinja to llama-server to enable tool calling support from the server ai engine end.
+
+* enable bTools in the settings page of the client side gui.
+
+* use a GenAi/LLM model which supports tool calling.
+
### using the front end
Open this simple web front end from your local browser
@@ -78,6 +92,7 @@ Once inside
* try trim garbage in response or not
* amount of chat history in the context sent to server/ai-model
* oneshot or streamed mode.
+ * use built in tool calling or not
* In completion mode
* one normally doesnt use a system prompt in completion mode.
@@ -116,6 +131,17 @@ Once inside
* the user input box will be disabled and a working message will be shown in it.
* if trim garbage is enabled, the logic will try to trim repeating text kind of garbage to some extent.
+* tool calling flow when working with ai models which support tool / function calling
+ * if tool calling is enabled and the user query results in need for one of the builtin tools to be
+ called, then the ai response might include request for tool call.
+ * the SimpleChat client will show details of the tool call (ie tool name and args passed) requested
+ and allow the user to trigger it as is or after modifying things as needed.
+ NOTE: Tool sees the original tool call only, for now
+ * inturn returned / generated result is placed into user query entry text area with approriate tags
+ ie
generated result with meta data
+ * if user is ok with the tool response, they can click submit to send the same to the GenAi/LLM.
+ User can even modify the response generated by the tool, if required, before submitting.
+
* just refresh the page, to reset wrt the chat history and or system prompt and start afresh.
* Using NewChat one can start independent chat sessions.
@@ -158,6 +184,19 @@ It is attached to the document object. Some of these can also be updated using t
inturn the machine goes into power saving mode or so, the platform may stop network connection,
leading to exception.
+ bTools - control whether tool calling is enabled or not
+
+ remember to enable this only for GenAi/LLM models which support tool/function calling.
+
+ the builtin tools' meta data is sent to the ai model in the requests sent to it.
+
+ inturn if the ai model requests a tool call to be made, the same will be done and the response
+ sent back to the ai model, under user control.
+
+ as tool calling will involve a bit of back and forth between ai assistant and end user, it is
+ recommended to set iRecentUserMsgCnt to 10 or more, so that enough context is retained during
+ chatting with ai models with tool support.
+
apiEP - select between /completions and /chat/completions endpoint provided by the server/ai-model.
bCompletionFreshChatAlways - whether Completion mode collates complete/sliding-window history when
@@ -201,10 +240,10 @@ It is attached to the document object. Some of these can also be updated using t
be set if needed using the settings ui.
iRecentUserMsgCnt - a simple minded SlidingWindow to limit context window load at Ai Model end.
- This is disabled by default. However if enabled, then in addition to latest system message, only
- the last/latest iRecentUserMsgCnt user messages after the latest system prompt and its responses
- from the ai model will be sent to the ai-model, when querying for a new response. IE if enabled,
- only user messages after the latest system message/prompt will be considered.
+ This is set to 10 by default. So in addition to latest system message, last/latest iRecentUserMsgCnt
+ user messages after the latest system prompt and its responses from the ai model will be sent
+ to the ai-model, when querying for a new response. Note that if enabled, only user messages after
+ the latest system message/prompt will be considered.
This specified sliding window user message count also includes the latest user query.
<0 : Send entire chat history to server
@@ -244,9 +283,11 @@ full chat history. This way if there is any response with garbage/repeatation, i
mess with things beyond the next question/request/query, in some ways. The trim garbage
option also tries to help avoid issues with garbage in the context to an extent.
-Set max_tokens to 1024, so that a relatively large previous reponse doesnt eat up the space
-available wrt next query-response. However dont forget that the server when started should
-also be started with a model context size of 1k or more, to be on safe side.
+Set max_tokens to 2048, so that a relatively large previous reponse doesnt eat up the space
+available wrt next query-response. While parallely allowing a good enough context size for
+some amount of the chat history in the current session to influence future answers. However
+dont forget that the server when started should also be started with a model context size of
+2k or more, to be on safe side.
The /completions endpoint of tools/server doesnt take max_tokens, instead it takes the
internal n_predict, for now add the same here on the client side, maybe later add max_tokens
@@ -281,6 +322,86 @@ NOTE: Not tested, as there is no free tier api testing available. However logica
work.
+### Tool Calling
+
+ALERT: The simple minded way in which this is implemented, it can be dangerous in the worst case,
+Always remember to verify all the tool calls requested and the responses generated manually to
+ensure everything is fine, during interaction with ai models with tools support.
+
+#### Builtin Tools
+
+The following tools/functions are currently provided by default
+* simple_calculator - which can solve simple arithmatic expressions
+* run_javascript_function_code - which can be used to run some javascript code in the browser
+ context.
+
+Currently the generated code / expression is run through a simple minded eval inside a web worker
+mechanism. Use of WebWorker helps avoid exposing browser global scope to the generated code directly.
+However any shared web worker scope isnt isolated. Either way always remember to cross check the tool
+requests and generated responses when using tool calling.
+
+May add
+* web_fetch along with a corresponding simple local web proxy/caching server logic that can bypass
+ the CORS restrictions applied if trying to directly fetch from the browser js runtime environment.
+ Inturn maybe with a white list of allowed sites to access or so.
+
+
+#### Extending with new tools
+
+Provide a descriptive meta data explaining the tool / function being provided for tool calling,
+as well as its arguments.
+
+Provide a handler which should implement the specified tool / function call or rather constructs
+the code to be run to get the tool / function call job done, and inturn pass the same to the
+provided web worker to get it executed. Remember to use console.log while generating any response
+that should be sent back to the ai model, in your constructed code.
+
+Update the tc_switch to include a object entry for the tool, which inturn includes
+* the meta data as well as
+* a reference to the handler and also
+ the handler should take toolCallId, toolName and toolArgs and pass these along to
+ web worker as needed.
+* the result key (was used previously, may use in future, but for now left as is)
+
+#### OLD: Mapping tool calls and responses to normal assistant - user chat flow
+
+Instead of maintaining tool_call request and resultant response in logically seperate parallel
+channel used for requesting tool_calls by the assistant and the resulstant tool role response,
+the SimpleChatTC pushes it into the normal assistant - user chat flow itself, by including the
+tool call and response as a pair of tagged request with details in the assistant block and inturn
+tagged response in the subsequent user block.
+
+This allows the GenAi/LLM to be aware of the tool calls it made as well as the responses it got,
+so that it can incorporate the results of the same in the subsequent chat / interactions.
+
+NOTE: This flow tested to be ok enough with Gemma-3N-E4B-it-Q8_0 LLM ai model for now. Logically
+given the way current ai models work, most of them should understand things as needed, but need
+to test this with other ai models later.
+
+TODO:OLD: Need to think later, whether to continue this simple flow, or atleast use tool role wrt
+the tool call responses or even go further and have the logically seperate tool_calls request
+structures also.
+
+DONE: rather both tool_calls structure wrt assistant messages and tool role based tool call
+result messages are generated as needed.
+
+
+#### ToDo
+
+WebFetch and Local web proxy/caching server
+
+Try and trap promises based flows to ensure all generated results or errors if any are caught
+before responding back to the ai model.
+
+Trap error responses.
+
+### Debuging the handshake
+
+When working with llama.cpp server based GenAi/LLM running locally
+
+sudo tcpdump -i lo -s 0 -vvv -A host 127.0.0.1 and port 8080 | tee /tmp/td.log
+
+
## At the end
Also a thank you to all open source and open model developers, who strive for the common good.
diff --git a/tools/server/public_simplechat/simplechat.css b/tools/server/public_simplechat/simplechat.css
index 13bfb80b48be8..d4755074b77c5 100644
--- a/tools/server/public_simplechat/simplechat.css
+++ b/tools/server/public_simplechat/simplechat.css
@@ -21,6 +21,9 @@
.role-user {
background-color: lightgray;
}
+.role-tool {
+ background-color: lightyellow;
+}
.role-trim {
background-color: lightpink;
}
diff --git a/tools/server/public_simplechat/simplechat.js b/tools/server/public_simplechat/simplechat.js
index 2fcd24a860bd4..9363063123930 100644
--- a/tools/server/public_simplechat/simplechat.js
+++ b/tools/server/public_simplechat/simplechat.js
@@ -4,11 +4,14 @@
import * as du from "./datautils.mjs";
import * as ui from "./ui.mjs"
+import * as tools from "./tools.mjs"
+
class Roles {
static System = "system";
static User = "user";
static Assistant = "assistant";
+ static Tool = "tool";
}
class ApiEP {
@@ -16,6 +19,7 @@ class ApiEP {
Chat: "chat",
Completion: "completion",
}
+ /** @type {Object
} */
static UrlSuffix = {
'chat': `/chat/completions`,
'completion': `/completions`,
@@ -35,6 +39,172 @@ class ApiEP {
}
+/**
+ * @typedef {{id: string, type: string, function: {name: string, arguments: string}}} NSToolCalls
+ */
+
+/**
+ * @typedef {{role: string, content: string, tool_calls: Array}} NSChatMessage
+ */
+
+class ChatMessageEx {
+
+ /**
+ * Represent a Message in the Chat
+ * @param {string} role
+ * @param {string} content
+ * @param {Array} tool_calls
+ * @param {string} trimmedContent
+ */
+ constructor(role = "", content="", tool_calls=[], trimmedContent="") {
+ /** @type {NSChatMessage} */
+ this.ns = { role: role, content: content, tool_calls: tool_calls }
+ this.trimmedContent = trimmedContent;
+ }
+
+ /**
+ * Create a new instance from an existing instance
+ * @param {ChatMessageEx} old
+ */
+ static newFrom(old) {
+ return new ChatMessageEx(old.ns.role, old.ns.content, old.ns.tool_calls, old.trimmedContent)
+ }
+
+ clear() {
+ this.ns.role = "";
+ this.ns.content = "";
+ this.ns.tool_calls = [];
+ this.trimmedContent = "";
+ }
+
+ /**
+ * Create a all in one tool call result string
+ * @param {string} toolCallId
+ * @param {string} toolName
+ * @param {string} toolResult
+ */
+ static createToolCallResultAllInOne(toolCallId, toolName, toolResult) {
+ return ` ${toolCallId} ${toolName} ${toolResult} `;
+ }
+
+ /**
+ * Extract the elements of the all in one tool call result string
+ * @param {string} allInOne
+ */
+ static extractToolCallResultAllInOne(allInOne) {
+ const regex = /\s*(.*?)<\/id>\s*(.*?)<\/name>\s*([\s\S]*?)<\/content>\s*<\/tool_response>/si;
+ const caught = allInOne.match(regex)
+ let data = { tool_call_id: "Error", name: "Error", content: "Error" }
+ if (caught) {
+ data = {
+ tool_call_id: caught[1].trim(),
+ name: caught[2].trim(),
+ content: caught[3].trim()
+ }
+ }
+ return data
+ }
+
+ /**
+ * Set extra members into the ns object
+ * @param {string | number} key
+ * @param {any} value
+ */
+ ns_set_extra(key, value) {
+ // @ts-ignore
+ this.ns[key] = value
+ }
+
+ /**
+ * Remove specified key and its value from ns object
+ * @param {string | number} key
+ */
+ ns_delete(key) {
+ // @ts-ignore
+ delete(this.ns[key])
+ }
+
+ /**
+ * Update based on the drip by drip data got from network in streaming mode.
+ * Tries to support both Chat and Completion endpoints
+ * @param {any} nwo
+ * @param {string} apiEP
+ */
+ update_stream(nwo, apiEP) {
+ console.debug(nwo, apiEP)
+ if (apiEP == ApiEP.Type.Chat) {
+ if (nwo["choices"][0]["finish_reason"] === null) {
+ let content = nwo["choices"][0]["delta"]["content"];
+ if (content !== undefined) {
+ if (content !== null) {
+ this.ns.content += content;
+ } else {
+ this.ns.role = nwo["choices"][0]["delta"]["role"];
+ }
+ } else {
+ let toolCalls = nwo["choices"][0]["delta"]["tool_calls"];
+ if (toolCalls !== undefined) {
+ if (toolCalls[0]["function"]["name"] !== undefined) {
+ this.ns.tool_calls.push(toolCalls[0]);
+ /*
+ this.ns.tool_calls[0].function.name = toolCalls[0]["function"]["name"];
+ this.ns.tool_calls[0].id = toolCalls[0]["id"];
+ this.ns.tool_calls[0].type = toolCalls[0]["type"];
+ this.ns.tool_calls[0].function.arguments = toolCalls[0]["function"]["arguments"]
+ */
+ } else {
+ if (toolCalls[0]["function"]["arguments"] !== undefined) {
+ this.ns.tool_calls[0].function.arguments += toolCalls[0]["function"]["arguments"];
+ }
+ }
+ }
+ }
+ }
+ } else {
+ try {
+ this.ns.content += nwo["choices"][0]["text"];
+ } catch {
+ this.ns.content += nwo["content"];
+ }
+ }
+ }
+
+ /**
+ * Update based on the data got from network in oneshot mode
+ * @param {any} nwo
+ * @param {string} apiEP
+ */
+ update_oneshot(nwo, apiEP) {
+ if (apiEP == ApiEP.Type.Chat) {
+ this.ns.content = nwo["choices"][0]["message"]["content"];
+ } else {
+ try {
+ this.ns.content = nwo["choices"][0]["text"];
+ } catch {
+ this.ns.content = nwo["content"];
+ }
+ }
+ }
+
+ has_toolcall() {
+ if (this.ns.tool_calls.length == 0) {
+ return false
+ }
+ return true
+ }
+
+ content_equiv() {
+ if (this.ns.content !== "") {
+ return this.ns.content;
+ } else if (this.has_toolcall()) {
+ return `\n${this.ns.tool_calls[0].function.name}\n${this.ns.tool_calls[0].function.arguments}\n`;
+ } else {
+ return ""
+ }
+ }
+
+}
+
let gUsageMsg = `
Usage
@@ -44,8 +214,12 @@ let gUsageMsg = `
Completion mode - no system prompt normally.
Use shift+enter for inserting enter/newline.
- Enter your query to ai assistant below.
- Default ContextWindow = [System, Last Query+Resp, Cur Query].
+ Enter your query to ai assistant in textarea provided below.
+ If ai assistant requests a tool call, varify same before triggering it.
+
+ - submit tool response placed into user query textarea
+
+ Default ContextWindow = [System, Last9 Query+Resp, Cur Query].
- ChatHistInCtxt, MaxTokens, ModelCtxt window to expand
@@ -53,7 +227,7 @@ let gUsageMsg = `
`;
-/** @typedef {{role: string, content: string}[]} ChatMessages */
+/** @typedef {ChatMessageEx[]} ChatMessages */
/** @typedef {{iLastSys: number, xchat: ChatMessages}} SimpleChatODS */
@@ -70,7 +244,7 @@ class SimpleChat {
*/
this.xchat = [];
this.iLastSys = -1;
- this.latestResponse = "";
+ this.latestResponse = new ChatMessageEx();
}
clear() {
@@ -96,7 +270,16 @@ class SimpleChat {
/** @type {SimpleChatODS} */
let ods = JSON.parse(sods);
this.iLastSys = ods.iLastSys;
- this.xchat = ods.xchat;
+ this.xchat = [];
+ for (const cur of ods.xchat) {
+ if (cur.ns == undefined) {
+ /** @typedef {{role: string, content: string}} OldChatMessage */
+ let tcur = /** @type {OldChatMessage} */(/** @type {unknown} */(cur));
+ this.xchat.push(new ChatMessageEx(tcur.role, tcur.content))
+ } else {
+ this.xchat.push(new ChatMessageEx(cur.ns.role, cur.ns.content, cur.ns.tool_calls, cur.trimmedContent))
+ }
+ }
}
/**
@@ -118,8 +301,8 @@ class SimpleChat {
/** @type{ChatMessages} */
let rchat = [];
let sysMsg = this.get_system_latest();
- if (sysMsg.length != 0) {
- rchat.push({role: Roles.System, content: sysMsg});
+ if (sysMsg.ns.content.length != 0) {
+ rchat.push(sysMsg)
}
let iUserCnt = 0;
let iStart = this.xchat.length;
@@ -128,41 +311,55 @@ class SimpleChat {
break;
}
let msg = this.xchat[i];
- if (msg.role == Roles.User) {
+ if (msg.ns.role == Roles.User) {
iStart = i;
iUserCnt += 1;
}
}
for(let i = iStart; i < this.xchat.length; i++) {
let msg = this.xchat[i];
- if (msg.role == Roles.System) {
+ if (msg.ns.role == Roles.System) {
continue;
}
- rchat.push({role: msg.role, content: msg.content});
+ rchat.push(msg)
}
return rchat;
}
+
/**
- * Collate the latest response from the server/ai-model, as it is becoming available.
- * This is mainly useful for the stream mode.
- * @param {string} content
+ * Return recent chat messages in the format,
+ * which can be directly sent to the ai server.
+ * @param {number} iRecentUserMsgCnt - look at recent_chat for semantic
*/
- append_response(content) {
- this.latestResponse += content;
+ recent_chat_ns(iRecentUserMsgCnt) {
+ let xchat = this.recent_chat(iRecentUserMsgCnt);
+ let chat = [];
+ for (const msg of xchat) {
+ let tmsg = ChatMessageEx.newFrom(msg);
+ if (!tmsg.has_toolcall()) {
+ tmsg.ns_delete("tool_calls")
+ }
+ if (tmsg.ns.role == Roles.Tool) {
+ let res = ChatMessageEx.extractToolCallResultAllInOne(tmsg.ns.content)
+ tmsg.ns.content = res.content
+ tmsg.ns_set_extra("tool_call_id", res.tool_call_id)
+ tmsg.ns_set_extra("name", res.name)
+ }
+ chat.push(tmsg.ns);
+ }
+ return chat
}
/**
- * Add an entry into xchat
- * @param {string} role
- * @param {string|undefined|null} content
+ * Add an entry into xchat.
+ * NOTE: A new copy is created and added into xchat.
+ * Also update iLastSys system prompt index tracker
+ * @param {ChatMessageEx} chatMsg
*/
- add(role, content) {
- if ((content == undefined) || (content == null) || (content == "")) {
- return false;
- }
- this.xchat.push( {role: role, content: content} );
- if (role == Roles.System) {
+ add(chatMsg) {
+ this.xchat.push(ChatMessageEx.newFrom(chatMsg));
+ if (chatMsg.ns.role == Roles.System) {
this.iLastSys = this.xchat.length - 1;
}
this.save();
@@ -180,8 +377,8 @@ class SimpleChat {
}
let last = undefined;
for(const x of this.recent_chat(gMe.iRecentUserMsgCnt)) {
- let entry = ui.el_create_append_p(`${x.role}: ${x.content}`, div);
- entry.className = `role-${x.role}`;
+ let entry = ui.el_create_append_p(`${x.ns.role}: ${x.content_equiv()}`, div);
+ entry.className = `role-${x.ns.role}`;
last = entry;
}
if (last !== undefined) {
@@ -219,7 +416,7 @@ class SimpleChat {
* The needed fields/options are picked from a global object.
* Add optional stream flag, if required.
* Convert the json into string.
- * @param {Object} obj
+ * @param {Object} obj
*/
request_jsonstr_extend(obj) {
for(let k in gMe.apiRequestOptions) {
@@ -228,6 +425,9 @@ class SimpleChat {
if (gMe.bStream) {
obj["stream"] = true;
}
+ if (gMe.bTools) {
+ obj["tools"] = tools.meta();
+ }
return JSON.stringify(obj);
}
@@ -236,7 +436,7 @@ class SimpleChat {
*/
request_messages_jsonstr() {
let req = {
- messages: this.recent_chat(gMe.iRecentUserMsgCnt),
+ messages: this.recent_chat_ns(gMe.iRecentUserMsgCnt),
}
return this.request_jsonstr_extend(req);
}
@@ -248,15 +448,15 @@ class SimpleChat {
request_prompt_jsonstr(bInsertStandardRolePrefix) {
let prompt = "";
let iCnt = 0;
- for(const chat of this.recent_chat(gMe.iRecentUserMsgCnt)) {
+ for(const msg of this.recent_chat(gMe.iRecentUserMsgCnt)) {
iCnt += 1;
if (iCnt > 1) {
prompt += "\n";
}
if (bInsertStandardRolePrefix) {
- prompt += `${chat.role}: `;
+ prompt += `${msg.ns.role}: `;
}
- prompt += `${chat.content}`;
+ prompt += `${msg.ns.content}`;
}
let req = {
prompt: prompt,
@@ -276,73 +476,10 @@ class SimpleChat {
}
}
- /**
- * Extract the ai-model/assistant's response from the http response got.
- * Optionally trim the message wrt any garbage at the end.
- * @param {any} respBody
- * @param {string} apiEP
- */
- response_extract(respBody, apiEP) {
- let assistant = "";
- if (apiEP == ApiEP.Type.Chat) {
- assistant = respBody["choices"][0]["message"]["content"];
- } else {
- try {
- assistant = respBody["choices"][0]["text"];
- } catch {
- assistant = respBody["content"];
- }
- }
- return assistant;
- }
-
- /**
- * Extract the ai-model/assistant's response from the http response got in streaming mode.
- * @param {any} respBody
- * @param {string} apiEP
- */
- response_extract_stream(respBody, apiEP) {
- let assistant = "";
- if (apiEP == ApiEP.Type.Chat) {
- if (respBody["choices"][0]["finish_reason"] !== "stop") {
- assistant = respBody["choices"][0]["delta"]["content"];
- }
- } else {
- try {
- assistant = respBody["choices"][0]["text"];
- } catch {
- assistant = respBody["content"];
- }
- }
- return assistant;
- }
-
- /**
- * Allow setting of system prompt, but only at begining.
- * @param {string} sysPrompt
- * @param {string} msgTag
- */
- add_system_begin(sysPrompt, msgTag) {
- if (this.xchat.length == 0) {
- if (sysPrompt.length > 0) {
- return this.add(Roles.System, sysPrompt);
- }
- } else {
- if (sysPrompt.length > 0) {
- if (this.xchat[0].role !== Roles.System) {
- console.error(`ERRR:SimpleChat:SC:${msgTag}:You need to specify system prompt before any user query, ignoring...`);
- } else {
- if (this.xchat[0].content !== sysPrompt) {
- console.error(`ERRR:SimpleChat:SC:${msgTag}:You cant change system prompt, mid way through, ignoring...`);
- }
- }
- }
- }
- return false;
- }
/**
* Allow setting of system prompt, at any time.
+ * Updates the system prompt, if one was never set or if the newly passed is different from the last set system prompt.
* @param {string} sysPrompt
* @param {string} msgTag
*/
@@ -352,25 +489,24 @@ class SimpleChat {
}
if (this.iLastSys < 0) {
- return this.add(Roles.System, sysPrompt);
+ return this.add(new ChatMessageEx(Roles.System, sysPrompt));
}
- let lastSys = this.xchat[this.iLastSys].content;
+ let lastSys = this.xchat[this.iLastSys].ns.content;
if (lastSys !== sysPrompt) {
- return this.add(Roles.System, sysPrompt);
+ return this.add(new ChatMessageEx(Roles.System, sysPrompt));
}
return false;
}
/**
- * Retrieve the latest system prompt.
+ * Retrieve the latest system prompt related chat message entry.
*/
get_system_latest() {
if (this.iLastSys == -1) {
- return "";
+ return new ChatMessageEx(Roles.System);
}
- let sysPrompt = this.xchat[this.iLastSys].content;
- return sysPrompt;
+ return this.xchat[this.iLastSys];
}
@@ -387,7 +523,8 @@ class SimpleChat {
}
let tdUtf8 = new TextDecoder("utf-8");
let rr = resp.body.getReader();
- this.latestResponse = "";
+ this.latestResponse.clear()
+ this.latestResponse.ns.role = Roles.Assistant
let xLines = new du.NewLines();
while(true) {
let { value: cur, done: done } = await rr.read();
@@ -412,16 +549,16 @@ class SimpleChat {
}
let curJson = JSON.parse(curLine);
console.debug("DBUG:SC:PART:Json:", curJson);
- this.append_response(this.response_extract_stream(curJson, apiEP));
+ this.latestResponse.update_stream(curJson, apiEP);
}
- elP.innerText = this.latestResponse;
+ elP.innerText = this.latestResponse.content_equiv()
elP.scrollIntoView(false);
if (done) {
break;
}
}
- console.debug("DBUG:SC:PART:Full:", this.latestResponse);
- return this.latestResponse;
+ console.debug("DBUG:SC:PART:Full:", this.latestResponse.content_equiv());
+ return ChatMessageEx.newFrom(this.latestResponse);
}
/**
@@ -432,43 +569,65 @@ class SimpleChat {
async handle_response_oneshot(resp, apiEP) {
let respBody = await resp.json();
console.debug(`DBUG:SimpleChat:SC:${this.chatId}:HandleUserSubmit:RespBody:${JSON.stringify(respBody)}`);
- return this.response_extract(respBody, apiEP);
+ let cm = new ChatMessageEx(Roles.Assistant)
+ cm.update_oneshot(respBody, apiEP)
+ return cm
}
/**
* Handle the response from the server be it in oneshot or multipart/stream mode.
* Also take care of the optional garbage trimming.
+ * TODO: Need to handle tool calling and related flow, including how to show
+ * the assistant's request for tool calling and the response from tool.
* @param {Response} resp
* @param {string} apiEP
* @param {HTMLDivElement} elDiv
*/
async handle_response(resp, apiEP, elDiv) {
- let theResp = {
- assistant: "",
- trimmed: "",
- }
+ let theResp = null;
if (gMe.bStream) {
try {
- theResp.assistant = await this.handle_response_multipart(resp, apiEP, elDiv);
- this.latestResponse = "";
+ theResp = await this.handle_response_multipart(resp, apiEP, elDiv);
+ this.latestResponse.clear();
} catch (error) {
- theResp.assistant = this.latestResponse;
- this.add(Roles.Assistant, theResp.assistant);
- this.latestResponse = "";
+ theResp = this.latestResponse;
+ theResp.ns.role = Roles.Assistant;
+ this.add(theResp);
+ this.latestResponse.clear();
throw error;
}
} else {
- theResp.assistant = await this.handle_response_oneshot(resp, apiEP);
+ theResp = await this.handle_response_oneshot(resp, apiEP);
}
if (gMe.bTrimGarbage) {
- let origMsg = theResp.assistant;
- theResp.assistant = du.trim_garbage_at_end(origMsg);
- theResp.trimmed = origMsg.substring(theResp.assistant.length);
+ let origMsg = theResp.ns.content;
+ theResp.ns.content = du.trim_garbage_at_end(origMsg);
+ theResp.trimmedContent = origMsg.substring(theResp.ns.content.length);
}
- this.add(Roles.Assistant, theResp.assistant);
+ theResp.ns.role = Roles.Assistant;
+ this.add(theResp);
return theResp;
}
+ /**
+ * Call the requested tool/function.
+ * Returns undefined, if the call was placed successfully
+ * Else some appropriate error message will be returned.
+ * @param {string} toolcallid
+ * @param {string} toolname
+ * @param {string} toolargs
+ */
+ async handle_toolcall(toolcallid, toolname, toolargs) {
+ if (toolname === "") {
+ return "Tool/Function call name not specified"
+ }
+ try {
+ return await tools.tool_call(toolcallid, toolname, toolargs)
+ } catch (/** @type {any} */error) {
+ return `Tool/Function call raised an exception:${error.name}:${error.message}`
+ }
+ }
+
}
@@ -488,6 +647,10 @@ class MultiChatUI {
this.elDivHeading = /** @type{HTMLSelectElement} */(document.getElementById("heading"));
this.elDivSessions = /** @type{HTMLDivElement} */(document.getElementById("sessions-div"));
this.elBtnSettings = /** @type{HTMLButtonElement} */(document.getElementById("settings"));
+ this.elDivTool = /** @type{HTMLDivElement} */(document.getElementById("tool-div"));
+ this.elBtnTool = /** @type{HTMLButtonElement} */(document.getElementById("tool-btn"));
+ this.elInToolName = /** @type{HTMLInputElement} */(document.getElementById("toolname-in"));
+ this.elInToolArgs = /** @type{HTMLInputElement} */(document.getElementById("toolargs-in"));
this.validate_element(this.elInSystem, "system-in");
this.validate_element(this.elDivChat, "chat-div");
@@ -495,6 +658,10 @@ class MultiChatUI {
this.validate_element(this.elDivHeading, "heading");
this.validate_element(this.elDivChat, "sessions-div");
this.validate_element(this.elBtnSettings, "settings");
+ this.validate_element(this.elDivTool, "tool-div");
+ this.validate_element(this.elInToolName, "toolname-in");
+ this.validate_element(this.elInToolArgs, "toolargs-in");
+ this.validate_element(this.elBtnTool, "tool-btn");
}
/**
@@ -506,18 +673,42 @@ class MultiChatUI {
if (el == null) {
throw Error(`ERRR:SimpleChat:MCUI:${msgTag} element missing in html...`);
} else {
+ // @ts-ignore
console.debug(`INFO:SimpleChat:MCUI:${msgTag} Id[${el.id}] Name[${el["name"]}]`);
}
}
+ /**
+ * Reset/Setup Tool Call UI parts as needed
+ * @param {ChatMessageEx} ar
+ */
+ ui_reset_toolcall_as_needed(ar) {
+ if (ar.has_toolcall()) {
+ this.elDivTool.hidden = false
+ this.elInToolName.value = ar.ns.tool_calls[0].function.name
+ this.elInToolName.dataset.tool_call_id = ar.ns.tool_calls[0].id
+ this.elInToolArgs.value = ar.ns.tool_calls[0].function.arguments
+ this.elBtnTool.disabled = false
+ } else {
+ this.elDivTool.hidden = true
+ this.elInToolName.value = ""
+ this.elInToolName.dataset.tool_call_id = ""
+ this.elInToolArgs.value = ""
+ this.elBtnTool.disabled = true
+ }
+ }
+
/**
* Reset user input ui.
- * * clear user input
+ * * clear user input (if requested, default true)
* * enable user input
* * set focus to user input
+ * @param {boolean} [bClearElInUser=true]
*/
- ui_reset_userinput() {
- this.elInUser.value = "";
+ ui_reset_userinput(bClearElInUser=true) {
+ if (bClearElInUser) {
+ this.elInUser.value = "";
+ }
this.elInUser.disabled = false;
this.elInUser.focus();
}
@@ -535,6 +726,8 @@ class MultiChatUI {
this.handle_session_switch(this.curChatId);
}
+ this.ui_reset_toolcall_as_needed(new ChatMessageEx());
+
this.elBtnSettings.addEventListener("click", (ev)=>{
this.elDivChat.replaceChildren();
gMe.show_settings(this.elDivChat);
@@ -552,6 +745,19 @@ class MultiChatUI {
});
});
+ this.elBtnTool.addEventListener("click", (ev)=>{
+ if (this.elDivTool.hidden) {
+ return;
+ }
+ this.handle_tool_run(this.curChatId);
+ })
+
+ tools.setup((id, name, data)=>{
+ clearTimeout(this.idTimeOut)
+ this.elInUser.value = ChatMessageEx.createToolCallResultAllInOne(id, name, data);
+ this.ui_reset_userinput(false)
+ })
+
this.elInUser.addEventListener("keyup", (ev)=> {
// allow user to insert enter into their message using shift+enter.
// while just pressing enter key will lead to submitting.
@@ -593,6 +799,14 @@ class MultiChatUI {
/**
* Handle user query submit request, wrt specified chat session.
+ * NOTE: Currently the user query entry area is used for
+ * * showing and allowing edits by user wrt tool call results
+ * in a predfined simple xml format,
+ * ie before they submit tool result to ai engine on server
+ * * as well as for user to enter their own queries.
+ * Based on presence of the predefined xml format data at beginning
+ * the logic will treat it has a tool result and if not then as a
+ * normal user query.
* @param {string} chatId
* @param {string} apiEP
*/
@@ -608,13 +822,20 @@ class MultiChatUI {
chat.clear();
}
+ this.ui_reset_toolcall_as_needed(new ChatMessageEx());
+
chat.add_system_anytime(this.elInSystem.value, chatId);
let content = this.elInUser.value;
- if (!chat.add(Roles.User, content)) {
+ if (content.trim() == "") {
console.debug(`WARN:SimpleChat:MCUI:${chatId}:HandleUserSubmit:Ignoring empty user input...`);
return;
}
+ if (content.startsWith("")) {
+ chat.add(new ChatMessageEx(Roles.Tool, content))
+ } else {
+ chat.add(new ChatMessageEx(Roles.User, content))
+ }
chat.show(this.elDivChat);
let theUrl = ApiEP.Url(gMe.baseURL, apiEP);
@@ -633,16 +854,43 @@ class MultiChatUI {
let theResp = await chat.handle_response(resp, apiEP, this.elDivChat);
if (chatId == this.curChatId) {
chat.show(this.elDivChat);
- if (theResp.trimmed.length > 0) {
- let p = ui.el_create_append_p(`TRIMMED:${theResp.trimmed}`, this.elDivChat);
+ if (theResp.trimmedContent.length > 0) {
+ let p = ui.el_create_append_p(`TRIMMED:${theResp.trimmedContent}`, this.elDivChat);
p.className="role-trim";
}
} else {
console.debug(`DBUG:SimpleChat:MCUI:HandleUserSubmit:ChatId has changed:[${chatId}] [${this.curChatId}]`);
}
+ this.ui_reset_toolcall_as_needed(theResp);
this.ui_reset_userinput();
}
+ /**
+ * Handle running of specified tool call if any, for the specified chat session.
+ * Also sets up a timeout, so that user gets control back to interact with the ai model.
+ * @param {string} chatId
+ */
+ async handle_tool_run(chatId) {
+ let chat = this.simpleChats[chatId];
+ this.elInUser.value = "toolcall in progress...";
+ this.elInUser.disabled = true;
+ let toolname = this.elInToolName.value.trim()
+ let toolCallId = this.elInToolName.dataset.tool_call_id;
+ if (toolCallId === undefined) {
+ toolCallId = "??? ToolCallId Missing ???"
+ }
+ let toolResult = await chat.handle_toolcall(toolCallId, toolname, this.elInToolArgs.value)
+ if (toolResult !== undefined) {
+ this.elInUser.value = ChatMessageEx.createToolCallResultAllInOne(toolCallId, toolname, toolResult);
+ this.ui_reset_userinput(false)
+ } else {
+ this.idTimeOut = setTimeout(() => {
+ this.elInUser.value = ChatMessageEx.createToolCallResultAllInOne(toolCallId, toolname, `Tool/Function call ${toolname} taking too much time, aborting...`);
+ this.ui_reset_userinput(false)
+ }, 10000)
+ }
+ }
+
/**
* Show buttons for NewChat and available chat sessions, in the passed elDiv.
* If elDiv is undefined/null, then use this.elDivSessions.
@@ -682,6 +930,11 @@ class MultiChatUI {
}
}
+ /**
+ * Create session button and append to specified Div element.
+ * @param {HTMLDivElement} elDiv
+ * @param {string} cid
+ */
create_session_btn(elDiv, cid) {
let btn = ui.el_create_button(cid, (ev)=>{
let target = /** @type{HTMLButtonElement} */(ev.target);
@@ -708,7 +961,7 @@ class MultiChatUI {
console.error(`ERRR:SimpleChat:MCUI:HandleSessionSwitch:${chatId} missing...`);
return;
}
- this.elInSystem.value = chat.get_system_latest();
+ this.elInSystem.value = chat.get_system_latest().ns.content;
this.elInUser.value = "";
chat.show(this.elDivChat);
this.elInUser.focus();
@@ -726,28 +979,35 @@ class Me {
this.defaultChatIds = [ "Default", "Other" ];
this.multiChat = new MultiChatUI();
this.bStream = true;
+ this.bTools = false;
this.bCompletionFreshChatAlways = true;
this.bCompletionInsertStandardRolePrefix = false;
this.bTrimGarbage = true;
- this.iRecentUserMsgCnt = 2;
+ this.iRecentUserMsgCnt = 10;
+ /** @type {Object} */
this.sRecentUserMsgCnt = {
"Full": -1,
"Last0": 1,
"Last1": 2,
"Last2": 3,
"Last4": 5,
+ "Last9": 10,
};
this.apiEP = ApiEP.Type.Chat;
+ /** @type {Object} */
this.headers = {
"Content-Type": "application/json",
"Authorization": "", // Authorization: Bearer OPENAI_API_KEY
}
- // Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
+ /**
+ * Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
+ * @type {Object}
+ */
this.apiRequestOptions = {
"model": "gpt-3.5-turbo",
"temperature": 0.7,
- "max_tokens": 1024,
- "n_predict": 1024,
+ "max_tokens": 2048,
+ "n_predict": 2048,
"cache_prompt": false,
//"frequency_penalty": 1.2,
//"presence_penalty": 1.2,
@@ -780,7 +1040,7 @@ class Me {
chat.load();
queueMicrotask(()=>{
chat.show(div);
- this.multiChat.elInSystem.value = chat.get_system_latest();
+ this.multiChat.elInSystem.value = chat.get_system_latest().ns.content;
});
});
div.appendChild(btn);
@@ -804,6 +1064,8 @@ class Me {
ui.el_create_append_p(`bStream:${this.bStream}`, elDiv);
+ ui.el_create_append_p(`bTools:${this.bTools}`, elDiv);
+
ui.el_create_append_p(`bTrimGarbage:${this.bTrimGarbage}`, elDiv);
ui.el_create_append_p(`ApiEndPoint:${this.apiEP}`, elDiv);
@@ -878,6 +1140,11 @@ class Me {
});
elDiv.appendChild(bb.div);
+ bb = ui.el_creatediv_boolbutton("SetTools", "Tools", {true: "[+] yes tools", false: "[-] no tools"}, this.bTools, (val)=>{
+ this.bTools = val;
+ });
+ elDiv.appendChild(bb.div);
+
bb = ui.el_creatediv_boolbutton("SetTrimGarbage", "TrimGarbage", {true: "[+] yes trim", false: "[-] dont trim"}, this.bTrimGarbage, (val)=>{
this.bTrimGarbage = val;
});
@@ -886,6 +1153,7 @@ class Me {
this.show_settings_apirequestoptions(elDiv);
let sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{
+ // @ts-ignore
this.apiEP = ApiEP.Type[val];
});
elDiv.appendChild(sel.div);
@@ -917,8 +1185,13 @@ function startme() {
console.log("INFO:SimpleChat:StartMe:Starting...");
gMe = new Me();
gMe.debug_disable();
+ // @ts-ignore
document["gMe"] = gMe;
+ // @ts-ignore
document["du"] = du;
+ // @ts-ignore
+ document["tools"] = tools;
+ tools.init()
for (let cid of gMe.defaultChatIds) {
gMe.multiChat.new_chat_session(cid);
}
diff --git a/tools/server/public_simplechat/test-tools-cmdline.sh b/tools/server/public_simplechat/test-tools-cmdline.sh
new file mode 100644
index 0000000000000..8fc62d2af9a48
--- /dev/null
+++ b/tools/server/public_simplechat/test-tools-cmdline.sh
@@ -0,0 +1,92 @@
+echo "DONT FORGET TO RUN llama-server"
+echo "build/bin/llama-server -m ~/Downloads/GenAi.Text/gemma-3n-E4B-it-Q8_0.gguf --path tools/server/public_simplechat --jinja"
+echo "Note: Remove stream: true line below, if you want one shot instead of streaming response from ai server"
+echo "Note: Using different locations below, as the mechanism / url used to fetch will / may need to change"
+echo "Note: sudo tcpdump -i lo -s 0 -vvv -A host 127.0.0.1 and port 8080 | tee /tmp/td.log can be used to capture the hs"
+curl http://localhost:8080/v1/chat/completions -d '{
+ "model": "gpt-3.5-turbo",
+ "stream": true,
+ "tools": [
+ {
+ "type":"function",
+ "function":{
+ "name":"javascript",
+ "description":"Runs code in an javascript interpreter and returns the result of the execution after 60 seconds.",
+ "parameters":{
+ "type":"object",
+ "properties":{
+ "code":{
+ "type":"string",
+ "description":"The code to run in the javascript interpreter."
+ }
+ },
+ "required":["code"]
+ }
+ }
+ },
+ {
+ "type":"function",
+ "function":{
+ "name":"web_fetch",
+ "description":"Connects to the internet and fetches the specified url, may take few seconds",
+ "parameters":{
+ "type":"object",
+ "properties":{
+ "url":{
+ "type":"string",
+ "description":"The url to fetch from internet."
+ }
+ },
+ "required":["url"]
+ }
+ }
+ },
+ {
+ "type":"function",
+ "function":{
+ "name":"simple_calc",
+ "description":"Calculates the provided arithmatic expression using javascript interpreter and returns the result of the execution after few seconds.",
+ "parameters":{
+ "type":"object",
+ "properties":{
+ "arithexp":{
+ "type":"string",
+ "description":"The arithmatic expression that will be calculated using javascript interpreter."
+ }
+ },
+ "required":["arithexp"]
+ }
+ }
+ }
+ ],
+ "messages": [
+ {
+ "role": "user",
+ "content": "What and all tools you have access to"
+ }
+ ]
+}'
+
+
+exit
+
+
+ "content": "what is your name."
+ "content": "What and all tools you have access to"
+ "content": "do you have access to any tools"
+ "content": "Print a hello world message with python."
+ "content": "Print a hello world message with javascript."
+ "content": "Calculate the sum of 5 and 27."
+ "content": "Can you get me todays date."
+ "content": "Can you get me a summary of latest news from bbc world"
+ "content": "Can you get todays date. And inturn add 10 to todays date"
+ "content": "Who is known as father of the nation in India, also is there a similar figure for USA as well as UK"
+ "content": "Who is known as father of the nation in India, Add 10 to double his year of birth and show me the results."
+ "content": "How is the weather today in london."
+ "content": "How is the weather today in london. Add 324 to todays temperature in celcius in london"
+ "content": "How is the weather today in bengaluru. Add 324 to todays temperature in celcius in kochi"
+ "content": "Add 324 to todays temperature in celcius in london"
+ "content": "Add 324 to todays temperature in celcius in delhi"
+ "content": "Add 324 to todays temperature in celcius in delhi. Dont forget to get todays weather info about delhi so that the temperature is valid"
+ "content": "Add 324 to todays temperature in celcius in bengaluru. Dont forget to get todays weather info about bengaluru so that the temperature is valid. Use a free weather info site which doesnt require any api keys to get the info"
+ "content": "Can you get the cutoff rank for all the deemed medical universities in India for UGNeet 25"
diff --git a/tools/server/public_simplechat/tooljs.mjs b/tools/server/public_simplechat/tooljs.mjs
new file mode 100644
index 0000000000000..a44333ca1b3e7
--- /dev/null
+++ b/tools/server/public_simplechat/tooljs.mjs
@@ -0,0 +1,98 @@
+//@ts-check
+// DANGER DANGER DANGER - Simple and Stupid - Use from a discardable VM only
+// Helpers to handle tools/functions calling wrt
+// * javascript interpreter
+// * simple arithmatic calculator
+// by Humans for All
+//
+
+
+let gToolsWorker = /** @type{Worker} */(/** @type {unknown} */(null));
+
+
+let js_meta = {
+ "type": "function",
+ "function": {
+ "name": "run_javascript_function_code",
+ "description": "Runs given code using eval within a web worker context in a browser's javascript environment and returns the console.log outputs of the execution after few seconds",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The code that will be run using eval within a web worker in the browser's javascript interpreter environment."
+ }
+ },
+ "required": ["code"]
+ }
+ }
+ }
+
+
+/**
+ * Implementation of the javascript interpretor logic. Minimal skeleton for now.
+ * ALERT: Has access to the javascript web worker environment and can mess with it and beyond
+ * @param {string} toolcallid
+ * @param {string} toolname
+ * @param {any} obj
+ */
+function js_run(toolcallid, toolname, obj) {
+ gToolsWorker.postMessage({ id: toolcallid, name: toolname, code: obj["code"]})
+}
+
+
+let calc_meta = {
+ "type": "function",
+ "function": {
+ "name": "simple_calculator",
+ "description": "Calculates the provided arithmatic expression using console.log within a web worker of a browser's javascript interpreter environment and returns the output of the execution once it is done in few seconds",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "arithexpr":{
+ "type":"string",
+ "description":"The arithmatic expression that will be calculated by passing it to console.log of a browser's javascript interpreter."
+ }
+ },
+ "required": ["arithexpr"]
+ }
+ }
+ }
+
+
+/**
+ * Implementation of the simple calculator logic. Minimal skeleton for now.
+ * ALERT: Has access to the javascript web worker environment and can mess with it and beyond
+ * @param {string} toolcallid
+ * @param {string} toolname
+ * @param {any} obj
+ */
+function calc_run(toolcallid, toolname, obj) {
+ gToolsWorker.postMessage({ id: toolcallid, name: toolname, code: `console.log(${obj["arithexpr"]})`})
+}
+
+
+/**
+ * @type {Object>}
+ */
+export let tc_switch = {
+ "run_javascript_function_code": {
+ "handler": js_run,
+ "meta": js_meta,
+ "result": ""
+ },
+ "simple_calculator": {
+ "handler": calc_run,
+ "meta": calc_meta,
+ "result": ""
+ }
+}
+
+
+/**
+ * Used to get hold of the web worker to use for running tool/function call related code
+ * @param {Worker} toolsWorker
+ */
+export function init(toolsWorker) {
+ gToolsWorker = toolsWorker
+}
diff --git a/tools/server/public_simplechat/tools.mjs b/tools/server/public_simplechat/tools.mjs
new file mode 100644
index 0000000000000..8c89e965258b4
--- /dev/null
+++ b/tools/server/public_simplechat/tools.mjs
@@ -0,0 +1,64 @@
+//@ts-check
+// DANGER DANGER DANGER - Simple and Stupid - Use from a discardable VM only
+// Helpers to handle tools/functions calling in a direct and dangerous way
+// by Humans for All
+//
+
+
+import * as tjs from './tooljs.mjs'
+
+
+let gToolsWorker = new Worker('./toolsworker.mjs', { type: 'module' });
+/**
+ * @type {Object>}
+ */
+export let tc_switch = {}
+
+export function init() {
+ tjs.init(gToolsWorker)
+ for (const key in tjs.tc_switch) {
+ tc_switch[key] = tjs.tc_switch[key]
+ }
+}
+
+export function meta() {
+ let tools = []
+ for (const key in tc_switch) {
+ tools.push(tc_switch[key]["meta"])
+ }
+ return tools
+}
+
+/**
+ * Setup the callback that will be called when ever message
+ * is recieved from the Tools Web Worker.
+ * @param {(id: string, name: string, data: string) => void} cb
+ */
+export function setup(cb) {
+ gToolsWorker.onmessage = function (ev) {
+ cb(ev.data.id, ev.data.name, ev.data.data)
+ }
+}
+
+
+/**
+ * Try call the specified tool/function call.
+ * Returns undefined, if the call was placed successfully
+ * Else some appropriate error message will be returned.
+ * @param {string} toolcallid
+ * @param {string} toolname
+ * @param {string} toolargs
+ */
+export async function tool_call(toolcallid, toolname, toolargs) {
+ for (const fn in tc_switch) {
+ if (fn == toolname) {
+ try {
+ tc_switch[fn]["handler"](toolcallid, fn, JSON.parse(toolargs))
+ return undefined
+ } catch (/** @type {any} */error) {
+ return `Tool/Function call raised an exception:${error.name}:${error.message}`
+ }
+ }
+ }
+ return `Unknown Tool/Function Call:${toolname}`
+}
diff --git a/tools/server/public_simplechat/toolsconsole.mjs b/tools/server/public_simplechat/toolsconsole.mjs
new file mode 100644
index 0000000000000..b372dc74ef329
--- /dev/null
+++ b/tools/server/public_simplechat/toolsconsole.mjs
@@ -0,0 +1,57 @@
+//@ts-check
+// Helpers to handle tools/functions calling wrt console
+// by Humans for All
+//
+
+
+/** The redirected console.log's capture-data-space */
+export let gConsoleStr = ""
+/**
+ * Maintain original console.log, when needed
+ * @type { {(...data: any[]): void} | null}
+ */
+let gOrigConsoleLog = null
+
+
+/**
+ * The trapping console.log
+ * @param {any[]} args
+ */
+export function console_trapped(...args) {
+ let res = args.map((arg)=>{
+ if (typeof arg == 'object') {
+ return JSON.stringify(arg);
+ } else {
+ return String(arg);
+ }
+ }).join(' ');
+ gConsoleStr += `${res}\n`;
+}
+
+/**
+ * Save the original console.log, if needed.
+ * Setup redir of console.log.
+ * Clear the redirected console.log's capture-data-space.
+ */
+export function console_redir() {
+ if (gOrigConsoleLog == null) {
+ if (console.log == console_trapped) {
+ throw new Error("ERRR:ToolsConsole:ReDir:Original Console.Log lost???");
+ }
+ gOrigConsoleLog = console.log
+ }
+ console.log = console_trapped
+ gConsoleStr = ""
+}
+
+/**
+ * Revert the redirected console.log to the original console.log, if possible.
+ */
+export function console_revert() {
+ if (gOrigConsoleLog !== null) {
+ if (gOrigConsoleLog == console_trapped) {
+ throw new Error("ERRR:ToolsConsole:Revert:Original Console.Log lost???");
+ }
+ console.log = gOrigConsoleLog
+ }
+}
diff --git a/tools/server/public_simplechat/toolsworker.mjs b/tools/server/public_simplechat/toolsworker.mjs
new file mode 100644
index 0000000000000..590c45234be7b
--- /dev/null
+++ b/tools/server/public_simplechat/toolsworker.mjs
@@ -0,0 +1,25 @@
+//@ts-check
+// STILL DANGER DANGER DANGER - Simple and Stupid - Use from a discardable VM only
+// Helpers to handle tools/functions calling using web worker
+// by Humans for All
+//
+
+/**
+ * Expects to get a message with identifier name and code to run
+ * Posts message with identifier name and data captured from console.log outputs
+ */
+
+
+import * as tconsole from "./toolsconsole.mjs"
+
+
+self.onmessage = function (ev) {
+ tconsole.console_redir()
+ try {
+ eval(ev.data.code)
+ } catch (/** @type {any} */error) {
+ console.log(`\n\nTool/Function call "${ev.data.name}" raised an exception:${error.name}:${error.message}\n\n`)
+ }
+ tconsole.console_revert()
+ self.postMessage({ id: ev.data.id, name: ev.data.name, data: tconsole.gConsoleStr})
+}