Skip to content

Commit

Permalink
"Small" changes (#213)
Browse files Browse the repository at this point in the history
* Remove notification dots

* Add batch uids to response objects.

* Regroup responses by batch ids in inspectors. Add batch ids to resp objs. Update examples.

* Bug fix: clear RF state first before loading a flow

* Add random sample toggle to Tabular Data node

* Make sample UI loc conditional on num cols and fit nicer into whitespace

* Adds 'settings template vars' to parametrize on model settings.

* Typecast settings vars params

* Rebuild app and update version
  • Loading branch information
ianarawjo committed Jan 20, 2024
1 parent 3d15bc9 commit 7e1f436
Show file tree
Hide file tree
Showing 35 changed files with 42,681 additions and 13,021 deletions.
3,734 changes: 2,744 additions & 990 deletions chainforge/examples/basic-comparison.cforge

Large diffs are not rendered by default.

1,650 changes: 1,331 additions & 319 deletions chainforge/examples/basic-function-calls.cforge

Large diffs are not rendered by default.

3,006 changes: 2,053 additions & 953 deletions chainforge/examples/basic-math.cforge

Large diffs are not rendered by default.

2,084 changes: 1,766 additions & 318 deletions chainforge/examples/chaining-prompts.cforge

Large diffs are not rendered by default.

26,802 changes: 21,709 additions & 5,093 deletions chainforge/examples/comparing-system-msg.cforge

Large diffs are not rendered by default.

17,951 changes: 12,703 additions & 5,248 deletions chainforge/examples/prompt-injection-test.cforge

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions chainforge/react-server/build/asset-manifest.json
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
{
"files": {
"main.css": "/static/css/main.01603dff.css",
"main.js": "/static/js/main.dd40466e.js",
"main.css": "/static/css/main.c0f80104.css",
"main.js": "/static/js/main.8f9ef9df.js",
"static/js/787.4c72bb55.chunk.js": "/static/js/787.4c72bb55.chunk.js",
"index.html": "/index.html",
"main.01603dff.css.map": "/static/css/main.01603dff.css.map",
"main.dd40466e.js.map": "/static/js/main.dd40466e.js.map",
"main.c0f80104.css.map": "/static/css/main.c0f80104.css.map",
"main.8f9ef9df.js.map": "/static/js/main.8f9ef9df.js.map",
"787.4c72bb55.chunk.js.map": "/static/js/787.4c72bb55.chunk.js.map"
},
"entrypoints": [
"static/css/main.01603dff.css",
"static/js/main.dd40466e.js"
"static/css/main.c0f80104.css",
"static/js/main.8f9ef9df.js"
]
}
2 changes: 1 addition & 1 deletion chainforge/react-server/build/index.html
Original file line number Diff line number Diff line change
@@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><script async src="https://www.googletagmanager.com/gtag/js?id=G-RN3FDBLMCR"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag("js",new Date),gtag("config","G-RN3FDBLMCR")</script><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="A visual programming environment for prompt engineering"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>ChainForge</title><script defer="defer" src="/static/js/main.dd40466e.js"></script><link href="/static/css/main.01603dff.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><script async src="https://www.googletagmanager.com/gtag/js?id=G-RN3FDBLMCR"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag("js",new Date),gtag("config","G-RN3FDBLMCR")</script><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="A visual programming environment for prompt engineering"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>ChainForge</title><script defer="defer" src="/static/js/main.8f9ef9df.js"></script><link href="/static/css/main.c0f80104.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

This file was deleted.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

34 changes: 23 additions & 11 deletions chainforge/react-server/src/App.js
Original file line number Diff line number Diff line change
Expand Up @@ -340,11 +340,23 @@ const App = () => {
rf_inst.setViewport({x:0, y:0, zoom:1});
}
resetLLMColors();
setNodes(flow.nodes || []);
setEdges(flow.edges || []);

// Save flow that user loaded to autosave cache, in case they refresh the browser
StorageCache.saveToLocalStorage('chainforge-flow', flow);
// First, clear the ReactFlow state entirely
// NOTE: We need to do this so it forgets any node/edge ids, which might have cross-over in the loaded flow.
setNodes([]);
setEdges([]);

// After a delay, load in the new state.
setTimeout(() => {
setNodes(flow.nodes || []);
setEdges(flow.edges || []);

// Save flow that user loaded to autosave cache, in case they refresh the browser
StorageCache.saveToLocalStorage('chainforge-flow', flow);

// Cancel loading spinner
setIsLoading(false);
}, 10);

// Start auto-saving, if it's not already enabled
if (rf_inst) initAutosaving(rf_inst);
Expand Down Expand Up @@ -403,6 +415,8 @@ const App = () => {
const importFlowFromJSON = useCallback((flowJSON, rf_inst) => {
const rf = rf_inst || rfInstance;

setIsLoading(true);

// Detect if there's no cache data
if (!flowJSON.cache) {
// Support for loading old flows w/o cache data:
Expand Down Expand Up @@ -436,6 +450,9 @@ const App = () => {
// Handle file selection
input.addEventListener("change", function(event) {

// Start loading spinner
setIsLoading(false);

const file = event.target.files[0];
const reader = new FileReader();

Expand Down Expand Up @@ -463,12 +480,11 @@ const App = () => {

// Downloads the selected OpenAI eval file (preconverted to a .cforge flow)
const importFlowFromOpenAIEval = (evalname) => {
setIsLoading(true);

fetch_from_backend('fetchOpenAIEval', {
name: evalname,
}, handleError).then(function(json) {
// Close the loading modal
setIsLoading(false);

// Detect any issues with the response
if (!json) {
handleError('Request was sent and received by backend server, but there was no response.');
Expand Down Expand Up @@ -498,9 +514,6 @@ const App = () => {
fetch_from_backend('fetchExampleFlow', {
'name': name,
}, handleError).then(function(json) {
// Close the loading modal
setIsLoading(false);

if (!json)
throw new Error('Request to fetch example flow was sent to backend server, but there was no response.');
else if (json.error || !json.data)
Expand Down Expand Up @@ -679,7 +692,6 @@ const App = () => {
try {
const cforge_json = JSON.parse(LZString.decompressFromUTF16(response));
importFlowFromJSON(cforge_json, rf_inst);
setIsLoading(false);
} catch (err) {
handleError(err);
}
Expand Down
5 changes: 2 additions & 3 deletions chainforge/react-server/src/CodeEvaluatorNode.js
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ function process(response) {
*/
const CodeEvaluatorNode = ({ data, id, type: node_type }) => {

const inputEdgesForNode = useStore((state) => state.inputEdgesForNode);
const pullInputData = useStore((state) => state.pullInputData);
const pingOutputNodes = useStore((state) => state.pingOutputNodes);
const setDataPropsForNode = useStore((state) => state.setDataPropsForNode);
Expand Down Expand Up @@ -288,7 +287,8 @@ const CodeEvaluatorNode = ({ data, id, type: node_type }) => {
prompt: resp_obj['prompt'],
fill_history: resp_obj['vars'],
metavars: resp_obj['metavars'] || {},
llm: resp_obj['llm'] };
llm: resp_obj['llm'],
batch_id: resp_obj['uid'] };

// Carry over any chat history
if (resp_obj['chat_history'])
Expand Down Expand Up @@ -449,7 +449,6 @@ const CodeEvaluatorNode = ({ data, id, type: node_type }) => {
{ lastRunSuccess && lastResponses && lastResponses.length > 0 ?
(<InspectFooter label={<>Inspect results&nbsp;<IconSearch size='12pt'/></>}
onClick={showResponseInspector}
showNotificationDot={uninspectedResponses}
isDrawerOpen={showDrawer}
showDrawerButton={true}
onDrawerClick={() => {
Expand Down
6 changes: 5 additions & 1 deletion chainforge/react-server/src/JoinNode.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import React, { useState, useEffect, useCallback } from 'react';
import { Handle } from 'reactflow';
import { v4 as uuid } from 'uuid';
import useStore from './store';
import BaseNode from './BaseNode';
import NodeLabel from './NodeLabelComponent';
Expand Down Expand Up @@ -187,6 +188,7 @@ const JoinNode = ({ data, id }) => {
fill_history: isMetavar ? {} : vars,
metavars: isMetavar ? vars : {},
llm: llm,
batch_id: uuid(),
// NOTE: We lose all other metadata here, because we could've joined across other vars or metavars values.
};
});
Expand All @@ -199,6 +201,7 @@ const JoinNode = ({ data, id }) => {
fill_history: {},
metavars: {},
llm: llm,
batch_id: uuid(),
});
}

Expand All @@ -219,6 +222,7 @@ const JoinNode = ({ data, id }) => {
fill_history: p.fill_history,
llm: "__LLM_key" in p.metavars ? llm_lookup[p.metavars['__LLM_key']] : undefined,
metavars: removeLLMTagFromMetadata(p.metavars),
batch_id: uuid(),
}));

// If there's multiple LLMs and groupByLLM is 'within', we need to
Expand Down Expand Up @@ -248,7 +252,7 @@ const JoinNode = ({ data, id }) => {

// If there is exactly 1 LLM and it's present across all inputs, keep track of it:
if (numLLMs === 1 && resp_objs.every((r) => r.llm !== undefined))
joined_texts = {text: joined_texts, fill_history: {}, llm: resp_objs[0].llm};
joined_texts = {text: joined_texts, fill_history: {}, llm: resp_objs[0].llm, batch_id: uuid()};

setJoinedTexts([joined_texts]);
setDataPropsForNode(id, { fields: [joined_texts] });
Expand Down
1 change: 0 additions & 1 deletion chainforge/react-server/src/LLMEvalNode.js
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,6 @@ const LLMEvaluatorNode = ({ data, id }) => {
{ lastResponses && lastResponses.length > 0 ?
(<InspectFooter label={<>Inspect scores&nbsp;<IconSearch size='12pt'/></>}
onClick={showResponseInspector}
showNotificationDot={uninspectedResponses}
isDrawerOpen={showDrawer}
showDrawerButton={true}
onDrawerClick={() => {
Expand Down
28 changes: 18 additions & 10 deletions chainforge/react-server/src/LLMResponseInspector.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import { useDisclosure, useToggle } from '@mantine/hooks';
import { IconTable, IconLayoutList, IconLetterCaseToggle, IconFilter } from '@tabler/icons-react';
import * as XLSX from 'xlsx';
import useStore from './store';
import { filterDict, truncStr, groupResponsesBy } from './backend/utils';
import { transformDict, truncStr, groupResponsesBy, batchResponsesByUID } from './backend/utils';

// Helper funcs
const countResponsesBy = (responses, keyFunc) => {
Expand Down Expand Up @@ -114,7 +114,7 @@ export const exportToExcel = (jsonResponses, filename) => {
const vars = res_obj.vars;
const eval_res_items = res_obj.eval_res ? res_obj.eval_res.items : null;
return res_obj.responses.map((r, r_idx) => {
let row = { 'LLM': llm, 'Prompt': prompt, 'Response': r, 'Response Batch Id': res_obj_idx };
let row = { 'LLM': llm, 'Prompt': prompt, 'Response': r, 'Response Batch Id': (res_obj.uid ?? res_obj_idx) };
Object.keys(vars).forEach(varname => {
row[`Param: ${varname}`] = vars[varname];
});
Expand Down Expand Up @@ -172,13 +172,20 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => {
const [caseSensitive, toggleCaseSensitivity] = useToggle([false, true]);
const [filterBySearchValue, toggleFilterBySearchValue] = useToggle([true, false]);
const [numMatches, setNumMatches] = useState(-1);

// Count number of response texts wehenever jsonResponses changes
const numResponses = useMemo(() => {
if (jsonResponses && Array.isArray(jsonResponses) && jsonResponses.length > 0)
return jsonResponses.reduce((acc, resp_obj) => (acc + resp_obj["responses"].length), 0);
else
return 0;
}, [jsonResponses]);

// Regroup input responses by batch UID, whenever jsonResponses changes
const batchedResponses = useMemo(() =>
jsonResponses ? batchResponsesByUID(jsonResponses) : []
, [jsonResponses]);

// The var name to use for columns in the table view
const [tableColVar, setTableColVar] = useState("LLM");
const [userSelectedTableCol, setUserSelectedTableCol] = useState(false);
Expand All @@ -192,14 +199,14 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => {

// Update the visualization whenever the jsonResponses or MultiSelect values change:
useEffect(() => {
if (!jsonResponses || (Array.isArray(jsonResponses) && jsonResponses.length === 0))
if (!batchedResponses || (Array.isArray(batchedResponses) && batchedResponses.length === 0))
return;

// Find all vars in responses
let found_vars = new Set();
let found_metavars = new Set();
let found_llms = new Set();
jsonResponses.forEach(res_obj => {
batchedResponses.forEach(res_obj => {
Object.keys(res_obj.vars).forEach(v => found_vars.add(v));
Object.keys(res_obj.metavars).forEach(v => found_metavars.add(v));
found_llms.add(getLLMName(res_obj));
Expand All @@ -209,7 +216,7 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => {
found_llms = Array.from(found_llms);

// Whether there's some evaluation scores in the responses
const contains_eval_res = jsonResponses.some(res_obj => res_obj.eval_res !== undefined);
const contains_eval_res = batchedResponses.some(res_obj => res_obj.eval_res !== undefined);
setShowEvalScoreOptions(contains_eval_res);

// Set the variables accessible in the MultiSelect for 'group by'
Expand Down Expand Up @@ -240,7 +247,8 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => {
return; // useEffect will replot with the new values
}

let responses = jsonResponses;
// Regroup responses by batch ID
let responses = batchedResponses;
let numResponsesDisplayed = 0;
const selected_vars = multiSelectValue;
const empty_cell_text = searchValue.length > 0 ? "(no match)" : "(no data)";
Expand Down Expand Up @@ -312,7 +320,7 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => {
// At the deepest level, there may still be some vars left over. We want to display these
// as tags, too, so we need to display only the ones that weren't 'eaten' during the recursive call:
// (e.g., the vars that weren't part of the initial 'varnames' list that form the groupings)
const unused_vars = filterDict(res_obj.vars, v => !eatenvars.includes(v));
const unused_vars = transformDict(res_obj.vars, v => !eatenvars.includes(v));
const var_tags = Object.keys(unused_vars).map((varname) => {
const v = truncStr(unused_vars[varname].trim(), wideFormat ? 72 : 18);
return (<div key={varname} className="response-var-inline" >
Expand Down Expand Up @@ -342,7 +350,7 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => {
// Generate a table, with default columns for: input vars, LLMs queried
// First get column names as input vars + LLMs:
let var_cols, colnames, getColVal, found_sel_var_vals;
let metavar_cols = found_metavars;
let metavar_cols = []; // found_metavars; -- Disabling this functionality for now, since it is usually annoying.
if (tableColVar === 'LLM') {
var_cols = found_vars;
getColVal = getLLMName;
Expand Down Expand Up @@ -486,7 +494,7 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => {

setNumMatches(numResponsesDisplayed);

}, [multiSelectValue, jsonResponses, wideFormat, viewFormat, tableColVar, onlyShowScores, searchValue, caseSensitive, filterBySearchValue]);
}, [multiSelectValue, batchedResponses, wideFormat, viewFormat, tableColVar, onlyShowScores, searchValue, caseSensitive, filterBySearchValue]);

// When the user clicks an item in the drop-down,
// we want to autoclose the multiselect drop-down:
Expand Down
65 changes: 60 additions & 5 deletions chainforge/react-server/src/ModelSettingSchemas.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
* Descriptions of OpenAI model parameters copied from OpenAI's official chat completions documentation: https://platform.openai.com/docs/models/model-endpoint-compatibility
*/

import { RATE_LIMITS } from "./backend/models";
import { filterDict } from './backend/utils';
import { LLMProvider, RATE_LIMITS, getProvider } from "./backend/models";
import { transformDict } from './backend/utils';
import useStore from "./store";

const UI_SUBMIT_BUTTON_SPEC = {
Expand Down Expand Up @@ -46,7 +46,7 @@ const ChatGPTSettings = {
},
"system_msg": {
"type": "string",
"title": "System Message (chat models only)",
"title": "system_msg",
"description": "Many conversations begin with a system message to gently instruct the assistant. By default, ChainForge includes the suggested 'You are a helpful assistant.'",
"default": "You are a helpful assistant.",
"allow_empty_str": true,
Expand Down Expand Up @@ -616,7 +616,7 @@ const AzureOpenAISettings = {
"description": "Used when calling the OpenAI API through Azure services. Normally you don't need to change this setting.",
"default": "2023-05-15"
},
...filterDict(ChatGPTSettings.schema.properties, (key) => key !== 'model'),
...transformDict(ChatGPTSettings.schema.properties, (key) => key !== 'model'),
}
},
uiSchema: {
Expand Down Expand Up @@ -1028,7 +1028,7 @@ const OllamaSettings = {
},
system_msg: {
type: "string",
title: "System Message (chat models only)",
title: "system_msg (chat models only)",
description: "Enter your system message here. Note that the type of model must be set to 'chat' for this to be passed.",
default: "",
allow_empty_str: true,
Expand Down Expand Up @@ -1110,8 +1110,63 @@ export let ModelSettings = {
'hf': HuggingFaceTextInferenceSettings,
"luminous-base": AlephAlphaLuminousSettings,
"ollama": OllamaSettings,
};


export function getSettingsSchemaForLLM(llm) {
let llm_provider = getProvider(llm);

const provider_to_settings_schema = {
[LLMProvider.OpenAI]: GPT4Settings,
[LLMProvider.Anthropic]: ClaudeSettings,
[LLMProvider.Google]: PaLM2Settings,
[LLMProvider.Dalai] : DalaiModelSettings,
[LLMProvider.Azure_OpenAI]: AzureOpenAISettings,
[LLMProvider.HuggingFace]: HuggingFaceTextInferenceSettings,
[LLMProvider.Aleph_Alpha]: AlephAlphaLuminousSettings,
[LLMProvider.Ollama]: OllamaSettings,
};

if (llm_provider === LLMProvider.Custom)
return ModelSettings[llm];
else if (llm_provider in provider_to_settings_schema)
return provider_to_settings_schema[llm_provider];
else {
console.error(`Could not find provider for llm ${llm}`);
return {};
}
}

/**
* Processes settings values to the correct types according to schema for the model 'llm'.
* @param {*} settings_dict A dict of form setting_name: value (string: string)
* @param {*} llm A string of the name of the model to query.
*/
export function typecastSettingsDict(settings_dict, llm) {
let settings = getSettingsSchemaForLLM(llm);
let schema = settings?.schema?.properties ?? {};
let postprocessors = settings?.postprocessors ?? {};

// Return a clone of settings dict but with its values correctly typecast and postprocessed
return transformDict(settings_dict, undefined, undefined, (key, val) => {
if (key in schema) {
// Check for postprocessing for this key; if so, its 'type' is determined by the processor:
if (key in postprocessors)
return postprocessors[key](val);

// For other cases, use 'type' to typecast it:
const typeof_setting = schema[key].type ?? "string";
if (typeof_setting === "number") // process numbers (floats)
return parseFloat(val);
else if (typeof_setting === "integer") // process integers
return parseInt(val);
else if (typeof_setting === "boolean") // process booleans
return val.trim().toLowerCase() === "true";
}
return val; // process strings
});
}

/**
* Add new model provider to the AvailableLLMs list. Also adds the respective ModelSettings schema and rate limit.
* @param {*} name The name of the provider, to use in the dropdown menu and default name. Must be unique.
Expand Down
Loading

0 comments on commit 7e1f436

Please sign in to comment.