Skip to content

Commit

Permalink
format
Browse files Browse the repository at this point in the history
  • Loading branch information
SchneeHertz committed Aug 18, 2023
1 parent b7f3831 commit 4ff5d04
Show file tree
Hide file tree
Showing 10 changed files with 174 additions and 177 deletions.
108 changes: 54 additions & 54 deletions index.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,20 +13,20 @@ const { getSpeechText } = require('./modules/whisper.js')
const { ttsPromise } = require('./modules/edge-tts.js')
const { openaiChatStream, openaiEmbedding } = require('./modules/common.js')
const { functionAction, functionInfo, functionList } = require('./modules/functions.js')
const {config: {
const { config: {
DEFAULT_MODEL,
ADMIN_NAME, AI_NAME,
systemPrompt
}} = require('./utils/loadConfig.js')
} } = require('./utils/loadConfig.js')

const logFile = fs.createWriteStream(path.join(LOG_PATH, `log-${new Date().toLocaleString('zh-CN').replace(/[\/:]/gi, '-')}.txt`), {flags: 'w'})
const messageLog = (message)=>{
const logFile = fs.createWriteStream(path.join(LOG_PATH, `log-${new Date().toLocaleString('zh-CN').replace(/[\/:]/gi, '-')}.txt`), { flags: 'w' })
const messageLog = (message) => {
logFile.write(format(new Date().toLocaleString('zh-CN'), JSON.stringify(message)) + '\n')
}
const messageSend = (message)=>{
const messageSend = (message) => {
mainWindow.webContents.send('send-message', message)
}
const messageLogAndSend = (message)=>{
const messageLogAndSend = (message) => {
messageLog(message)
messageSend(message)
}
Expand All @@ -42,7 +42,7 @@ const STATUS = {
let speakTextList = []

let mainWindow
function createWindow () {
function createWindow() {
const win = new BrowserWindow({
width: 960,
height: 512,
Expand All @@ -58,7 +58,7 @@ function createWindow () {
win.loadURL('http://localhost:5173')
}
win.setMenuBarVisibility(false)
win.webContents.on('did-finish-load', ()=>{
win.webContents.on('did-finish-load', () => {
let name = require('./package.json').name
let version = require('./package.json').version
win.setTitle(name + ' ' + version)
Expand All @@ -70,28 +70,28 @@ function createWindow () {
}

app.whenReady()
.then(async ()=>{
const memorydb = await lancedb.connect(path.join(STORE_PATH, 'memorydb'))
const embedding = {
sourceColumn:'text',
embed: async (batch)=>{
let result = []
for (let text of batch) {
result.push(await openaiEmbedding({input: text}))
.then(async () => {
const memorydb = await lancedb.connect(path.join(STORE_PATH, 'memorydb'))
const embedding = {
sourceColumn: 'text',
embed: async (batch) => {
let result = []
for (let text of batch) {
result.push(await openaiEmbedding({ input: text }))
}
return result
}
return result
}
}
try {
memoryTable = await memorydb.openTable('memory', embedding)
} catch {
try {
memoryTable = await memorydb.createTable('memory', [{'text': 'Hello world!'}], embedding)
} catch {}
}
mainWindow = createWindow()
setInterval(()=>mainWindow.webContents.send('send-status', STATUS), 1000)
})
memoryTable = await memorydb.openTable('memory', embedding)
} catch {
try {
memoryTable = await memorydb.createTable('memory', [{ 'text': 'Hello world!' }], embedding)
} catch { }
}
mainWindow = createWindow()
setInterval(() => mainWindow.webContents.send('send-status', STATUS), 1000)
})
app.on('activate', () => {
if (BrowserWindow.getAllWindows().length === 0) {
mainWindow = createWindow()
Expand All @@ -111,7 +111,7 @@ app.on('window-all-closed', () => {
* @param {string} options.preAudioPath - The path to the pre-recorded audio prompt.
* @return {Promise} A promise that resolves when the audio prompts have been played successfully.
*/
const speakPrompt = async ({text, preAudioPath}) => {
const speakPrompt = async ({ text, preAudioPath }) => {
try {
let nextAudioPath = path.join(AUDIO_PATH, `${nanoid()}.mp3`)
if (text) {
Expand Down Expand Up @@ -150,22 +150,22 @@ const resolveSpeakTextList = async (preAudioPath) => {
if (speakTextList.length > 0) {
let { text, triggerRecord } = speakTextList.shift()
if (triggerRecord) {
await speakPrompt({preAudioPath})
await speakPrompt({ preAudioPath })
triggerSpeech()
setTimeout(resolveSpeakTextList, 1000)
} else {
speakPrompt({text, preAudioPath})
speakPrompt({ text, preAudioPath })
}
} else {
speakPrompt({preAudioPath})
speakPrompt({ preAudioPath })
}
} else if (speakTextList.length > 0) {
let { text, triggerRecord } = speakTextList.shift()
if (triggerRecord) {
triggerSpeech()
setTimeout(resolveSpeakTextList, 1000)
} else {
speakPrompt({text})
speakPrompt({ text })
}
} else {
setTimeout(resolveSpeakTextList, 1000)
Expand All @@ -182,18 +182,18 @@ resolveSpeakTextList()
* @param {Object} options.triggerRecord - The trigger record object.
* @return {Promise<void>} - A promise that resolves with the generated response.
*/
const resloveAdminPrompt = async ({prompt, triggerRecord})=> {
const resloveAdminPrompt = async ({ prompt, triggerRecord }) => {
let from = triggerRecord ? `(${AI_NAME})` : AI_NAME
let history = getStore('history')
let messages = [
{role: 'system', content: systemPrompt},
{role: 'user', content: `我的名字是${ADMIN_NAME}`},
{role: 'assistant', content: `你好, ${ADMIN_NAME}`},
{ role: 'system', content: systemPrompt },
{ role: 'user', content: `我的名字是${ADMIN_NAME}` },
{ role: 'assistant', content: `你好, ${ADMIN_NAME}` },
..._.takeRight(history, 12),
{role: 'user', content: prompt}
{ role: 'user', content: prompt }
]

history.push({role: 'user', content: prompt})
history.push({ role: 'user', content: prompt })
history = _.takeRight(history, 50)
setStore('history', history)

Expand All @@ -206,7 +206,7 @@ const resloveAdminPrompt = async ({prompt, triggerRecord})=> {
let resArgument = ''

try {
for await (const {token, f_token} of openaiChatStream({
for await (const { token, f_token } of openaiChatStream({
model: DEFAULT_MODEL,
messages,
functions: functionInfo,
Expand Down Expand Up @@ -238,7 +238,7 @@ const resloveAdminPrompt = async ({prompt, triggerRecord})=> {
}
}
}
let {name, arguments: arg} = f_token
let { name, arguments: arg } = f_token
if (name) resFunction = name
if (arg) resArgument += arg
}
Expand All @@ -253,7 +253,7 @@ const resloveAdminPrompt = async ({prompt, triggerRecord})=> {
try {
switch (resFunction) {
case 'getHistoricalConversationContent':
functionCallResult = await functionList[resFunction](_.assign({dbTable: memoryTable}, JSON.parse(resArgument)))
functionCallResult = await functionList[resFunction](_.assign({ dbTable: memoryTable }, JSON.parse(resArgument)))
break
default:
functionCallResult = await functionList[resFunction](JSON.parse(resArgument))
Expand All @@ -264,16 +264,16 @@ const resloveAdminPrompt = async ({prompt, triggerRecord})=> {
functionCallResult = ''
}
let functionCalling = [
{role: "assistant", content: null, function_call: {name: resFunction, arguments: resArgument}},
{role: "function", name: resFunction, content: functionCallResult}
{ role: "assistant", content: null, function_call: { name: resFunction, arguments: resArgument } },
{ role: "function", name: resFunction, content: functionCallResult }
]
messages.push(...functionCalling)
history.push(...functionCalling)
history = _.takeRight(history, 50)
setStore('history', history)
if (functionCallResult) console.log(functionCalling)

for await (const {token} of openaiChatStream({
for await (const { token } of openaiChatStream({
model: DEFAULT_MODEL,
messages,
})) {
Expand Down Expand Up @@ -320,10 +320,10 @@ const resloveAdminPrompt = async ({prompt, triggerRecord})=> {
from,
text: resText
})
history.push({role: 'assistant', content: resText})
history.push({ role: 'assistant', content: resText })
history = _.takeRight(history, 50)
setStore('history', history)
memoryTable.add([{text: resText}])
memoryTable.add([{ text: resText }])
if (triggerRecord) {
let speakIndex = STATUS.speakIndex
STATUS.isSpeechTalk += 1
Expand All @@ -344,10 +344,10 @@ const resloveAdminPrompt = async ({prompt, triggerRecord})=> {
*
* @return {Promise<void>} Returns a promise that resolves when the function is complete.
*/
const triggerSpeech = async ()=>{
const triggerSpeech = async () => {
if (STATUS.isSpeechTalk) {
STATUS.isRecording = true
mainWindow.setProgressBar(100, {mode: 'indeterminate'})
mainWindow.setProgressBar(100, { mode: 'indeterminate' })
let adminTalk = await getSpeechText()
console.log(adminTalk)
STATUS.isRecording = false
Expand All @@ -357,20 +357,20 @@ const triggerSpeech = async ()=>{
from: `(${ADMIN_NAME})`,
text: adminTalk
})
resloveAdminPrompt({prompt: adminTalk, triggerRecord: true})
resloveAdminPrompt({ prompt: adminTalk, triggerRecord: true })
}
}

ipcMain.handle('send-prompt', async (event, text)=>{
resloveAdminPrompt({prompt: text})
ipcMain.handle('send-prompt', async (event, text) => {
resloveAdminPrompt({ prompt: text })
})
ipcMain.handle('get-admin-name', async (event)=>{
ipcMain.handle('get-admin-name', async (event) => {
return ADMIN_NAME
})
ipcMain.handle('open-config', async (event)=>{
ipcMain.handle('open-config', async (event) => {
shell.openExternal(path.join(STORE_PATH, 'config.json'))
})
ipcMain.handle('switch-speech-talk', async ()=>{
ipcMain.handle('switch-speech-talk', async () => {
STATUS.isSpeechTalk = !STATUS.isSpeechTalk
mainWindow.setProgressBar(-1)
if (STATUS.isSpeechTalk) {
Expand Down
16 changes: 8 additions & 8 deletions modules/common.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ const { HttpsProxyAgent } = require('https-proxy-agent')
const axios = require('axios')
const _ = require('lodash')

const {config:{
const { config: {
OPENAI_API_KEY,
AZURE_OPENAI_KEY, AZURE_OPENAI_ENDPOINT, AZURE_API_VERSION,
DEFAULT_MODEL,
proxyObject, proxyString
}} = require('../utils/loadConfig.js')
} } = require('../utils/loadConfig.js')

const openai = new OpenAI({
apiKey: OPENAI_API_KEY,
Expand Down Expand Up @@ -54,17 +54,17 @@ const openaiChatStream = async function* ({ model = DEFAULT_MODEL, messages, fun
if (['stop', 'function_call'].includes(_.get(part, 'choices[0].delta.finish_reason'))) return
const token = _.get(part, 'choices[0].delta.content')
const f_token = _.get(part, 'choices[0].delta.function_call', {})
if (token || !_.isEmpty(f_token)) yield {token, f_token}
if (token || !_.isEmpty(f_token)) yield { token, f_token }
}
}

const openaiEmbedding = ({ input, model = 'text-embedding-ada-002' })=>{
const openaiEmbedding = ({ input, model = 'text-embedding-ada-002' }) => {
return openai.embeddings.create({
model, input
})
.then(res => {
return _.get(res, 'data[0].embedding')
})
.then(res => {
return _.get(res, 'data[0].embedding')
})
}

const azureOpenaiChat = ({ model = DEFAULT_MODEL, messages, timeoutMs = 40000 }) => {
Expand Down Expand Up @@ -128,7 +128,7 @@ const azureOpenaiChatStream = async function* ({ model = DEFAULT_MODEL, messages
}
}

const azureOpenaiEmbedding = ({ input, model = 'text-embedding-ada-002', timeoutMs = 20000 })=>{
const azureOpenaiEmbedding = ({ input, model = 'text-embedding-ada-002', timeoutMs = 20000 }) => {
return axios.post(
`${AZURE_OPENAI_ENDPOINT}/openai/deployments/${model}/embeddings?api-version=${AZURE_API_VERSION}`,
{ input },
Expand Down
8 changes: 4 additions & 4 deletions modules/edge-tts.js
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
const { spawn } = require('node:child_process')
const { config } = require('../utils/loadConfig.js')

let ttsPromise = (text, audioPath)=>{
const ttsPromise = (text, audioPath) => {
let vttPath = audioPath + '.vtt'
return new Promise((resolve, reject)=>{
return new Promise((resolve, reject) => {
const spawned = spawn('edge-tts', [
'-v', config.SpeechSynthesisVoiceName,
'--text', text,
'--write-media', audioPath,
'--write-subtitles', vttPath,
'--proxy', config.proxyString
])
spawned.on('error', data=>{
spawned.on('error', data => {
reject(data)
})
spawned.on('exit', code=>{
spawned.on('exit', code => {
if (code === 0) {
return resolve(vttPath)
}
Expand Down
24 changes: 12 additions & 12 deletions modules/functions.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
const google = require('@schneehertz/google-it')
const { config: {proxyString, AI_NAME} } = require('../utils/loadConfig.js')
const { config: { proxyString, AI_NAME } } = require('../utils/loadConfig.js')

const functionInfo = [
{
Expand All @@ -15,11 +15,11 @@ const functionInfo = [
},
"required": ["queryString"],
}
},
{
"name": "getHistoricalConversationContent",
"description": "Searching historical conversation content in conversation history.",
"parameters": {
},
{
"name": "getHistoricalConversationContent",
"description": "Searching historical conversation content in conversation history.",
"parameters": {
"type": "object",
"properties": {
"relatedText": {
Expand All @@ -33,15 +33,15 @@ const functionInfo = [
]

const functionAction = {
getInformationFromGoogle ({queryString}) {
getInformationFromGoogle({ queryString }) {
return `${AI_NAME}正在搜索${queryString}`
},
getHistoricalConversationContent ({relatedText}) {
getHistoricalConversationContent({ relatedText }) {
return `${AI_NAME}想起了关于${relatedText}的事情`
}
}

const getInformationFromGoogle = async ({queryString}) => {
const getInformationFromGoogle = async ({ queryString }) => {
let options = { proxy: proxyString }
let additionalQueryParam = {
lr: 'lang_zh-CN',
Expand All @@ -50,14 +50,14 @@ const getInformationFromGoogle = async ({queryString}) => {
gl: 'cn',
safe: 'high'
}
let googleRes = await google({options, disableConsole: true, query: queryString, limit: 6, additionalQueryParam})
let googleRes = await google({ options, disableConsole: true, query: queryString, limit: 6, additionalQueryParam })
// return googleRes.map(r=>r.snippet).join('\n').slice(0, 800)
return JSON.stringify(googleRes)
}

const getHistoricalConversationContent = async ({relatedText, dbTable}) => {
const getHistoricalConversationContent = async ({ relatedText, dbTable }) => {
let MemoryTexts = await dbTable.search(relatedText).limit(2).execute()
return MemoryTexts.map(s=>s.text).join('\n')
return MemoryTexts.map(s => s.text).join('\n')
}


Expand Down

0 comments on commit 4ff5d04

Please sign in to comment.