From a07c11ffd5c2e1aa4197b7395f0292cd3a3b5f7f Mon Sep 17 00:00:00 2001 From: Corina Gum <> Date: Wed, 27 Mar 2024 14:06:07 -0700 Subject: [PATCH] Begin mistral testing --- js/packages/teams-ai/src/models/LlamaModel.ts | 20 +++++++++++++++++-- .../04.ai.g.LLAMA/devTools/teamsapptester | 1 + js/samples/04.ai.g.LLAMA/src/index.ts | 10 ++++++---- .../src/prompts/default/config.json | 11 ++++++---- .../src/prompts/default/skprompt.txt | 2 +- 5 files changed, 33 insertions(+), 11 deletions(-) create mode 120000 js/samples/04.ai.g.LLAMA/devTools/teamsapptester diff --git a/js/packages/teams-ai/src/models/LlamaModel.ts b/js/packages/teams-ai/src/models/LlamaModel.ts index 734fc13fb..537a1b3d6 100644 --- a/js/packages/teams-ai/src/models/LlamaModel.ts +++ b/js/packages/teams-ai/src/models/LlamaModel.ts @@ -5,10 +5,13 @@ import { PromptCompletionModel, PromptResponse } from '../models'; import { Memory } from '../MemoryFork'; import { Message, PromptFunctions, PromptTemplate } from '../prompts'; import { Tokenizer } from '../tokenizers'; +import { Colorize } from '../internals/Colorize'; +import colorizeJson from 'json-colorizer'; export interface LlamaModelOptions { apiKey: string; endpoint: string; + logRequests: boolean; } export class LlamaModel implements PromptCompletionModel { @@ -44,18 +47,31 @@ export class LlamaModel implements PromptCompletionModel { } let last: Message | undefined = result.output[result.output.length - 1]; + console.log(Colorize.warning(`Hello ${JSON.stringify(result.output[result.output.length - 1])}`)); if (last?.role !== 'user') { last = undefined; } let res; + if (this.options.logRequests) { + console.log(Colorize.title('CHAT PROMPT:')); + console.log(Colorize.output(result.output)); + } + + console.log(`parameters: ${colorizeJson(JSON.stringify(template.config.completion))}`); try { - res = await this._httpClient.post<{ output: string }>(this.options.endpoint, { + res = await this._httpClient.post(this.options.endpoint, { input_data: { input_string: result.output, parameters: template.config.completion } }); + if (this.options.logRequests) { + console.log(Colorize.title('CHAT RESPONSE:')); + console.log(Colorize.value('status', res.status)); + console.log(Colorize.output(res.data[0])); + console.log(Colorize.title(`~~~~~~~~~`)); + } } catch (err) { console.error(err); throw err; @@ -66,7 +82,7 @@ export class LlamaModel implements PromptCompletionModel { input: last, message: { role: 'assistant', - content: res!.data.output + content: res!.data[0] } }; } diff --git a/js/samples/04.ai.g.LLAMA/devTools/teamsapptester b/js/samples/04.ai.g.LLAMA/devTools/teamsapptester new file mode 120000 index 000000000..9e46074d7 --- /dev/null +++ b/js/samples/04.ai.g.LLAMA/devTools/teamsapptester @@ -0,0 +1 @@ +/Users/corina/.fx/bin/testTool/0.1.0-beta.5 \ No newline at end of file diff --git a/js/samples/04.ai.g.LLAMA/src/index.ts b/js/samples/04.ai.g.LLAMA/src/index.ts index ad8367b72..195ef4114 100644 --- a/js/samples/04.ai.g.LLAMA/src/index.ts +++ b/js/samples/04.ai.g.LLAMA/src/index.ts @@ -74,14 +74,15 @@ interface ConversationState { } type ApplicationTurnState = TurnState; -if (!process.env.LLAMA_API_KEY && !process.env.LLAMA_ENDPOINT) { +if (!process.env.MISTRAL_API_KEY && !process.env.MISTRAL_ENDPOINT) { throw new Error('Missing environment variables - please check that LLAMA_API_KEY and LLAMA_ENDPOINT are set.'); } // Create AI components const model = new LlamaModel({ // Llama Support - apiKey: process.env.LLAMA_API_KEY!, - endpoint: process.env.LLAMA_ENDPOINT! + apiKey: process.env.MISTRAL_API_KEY!, + endpoint: process.env.MISTRAL_ENDPOINT!, + logRequests: true }); const prompts = new PromptManager({ @@ -99,7 +100,8 @@ const storage = new MemoryStorage(); const app = new Application({ storage, ai: { - planner + planner, + allow_looping: false } }); diff --git a/js/samples/04.ai.g.LLAMA/src/prompts/default/config.json b/js/samples/04.ai.g.LLAMA/src/prompts/default/config.json index b956cc1b5..65f1917e6 100644 --- a/js/samples/04.ai.g.LLAMA/src/prompts/default/config.json +++ b/js/samples/04.ai.g.LLAMA/src/prompts/default/config.json @@ -3,11 +3,14 @@ "description": "A bot that can turn the lights on and off", "type": "completion", "completion": { - "model": "llama-2-7b-chat-18", - "temperature": 0.2, + "model": "mistralai-Mistral-7B-Instruct-v01", + "temperature": 0.0, "top_p": 0.9, - "do_sample": true, - "max_new_tokens": 500 + "do_sample": false, + "response_format": { + "type": "json_object" + }, + "return_full_text": false }, "augmentation": { "augmentation_type": "sequence" diff --git a/js/samples/04.ai.g.LLAMA/src/prompts/default/skprompt.txt b/js/samples/04.ai.g.LLAMA/src/prompts/default/skprompt.txt index 5cdceb13b..82b6dc119 100644 --- a/js/samples/04.ai.g.LLAMA/src/prompts/default/skprompt.txt +++ b/js/samples/04.ai.g.LLAMA/src/prompts/default/skprompt.txt @@ -1,5 +1,5 @@ The following is a conversation with an AI assistant. -The assistant can turn a light on or off. +The assistant can turn a light on or off. The assistant can only manage lights. The assistant can only discuss lights. context: The lights are currently {{getLightStatus}}. \ No newline at end of file