Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,9 @@ GROQ_API_KEY=APIKEYGOESHERE
# https://brave.com/search/api/
BRAVE_SEARCH_API_KEY=APIKEYGOESHERE
# https://serper.dev/
SERPER_API=APIKEYGOESHERE
SERPER_API=APIKEYGOESHERE

# Set LAN GPU server, examples:
# PC | http://localhost:11434/v1
# LAN GPU server | http://192.168.1.100:11434/v1
OLLAMA_BASE_URL=http://localhost:11434/v1
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,5 @@ node_modules
.cache
.turbo
.vercel
.vscode
.vscode
.idea
2 changes: 1 addition & 1 deletion app/action.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import { config } from './config';
let openai: OpenAI;
if (config.useOllamaInference) {
openai = new OpenAI({
baseURL: 'http://localhost:11434/v1',
baseURL: config.ollamaBaseUrl,
apiKey: 'ollama'
});
} else {
Expand Down
7 changes: 5 additions & 2 deletions app/config.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
// - IMPORTANT: Follow-up questions are not yet implrmented with Ollama models, only OpenAI compatible models that use {type: "json_object"}

export const config = {
useOllamaInference: false,
useOllamaEmbeddings: false,
useOllamaInference: false,
useOllamaEmbeddings: false,
inferenceModel: 'mixtral-8x7b-32768', // Groq: 'mixtral-8x7b-32768', 'gemma-7b-it' // OpenAI: 'gpt-3.5-turbo', 'gpt-4' // Ollama 'mistral', 'llama2' etc
inferenceAPIKey: process.env.GROQ_API_KEY, // Groq: process.env.GROQ_API_KEY // OpenAI: process.env.OPENAI_API_KEY // Ollama: 'ollama' is the default
embeddingsModel: 'text-embedding-3-small', // Ollama: 'llama2', 'nomic-embed-text' // OpenAI 'text-embedding-3-small', 'text-embedding-3-large'
Expand All @@ -15,4 +15,7 @@ export const config = {
numberOfSimilarityResults: 4, // Numbher of similarity results to return per page
numberOfPagesToScan: 10, // Recommended to decrease for Ollama
nonOllamaBaseURL: 'https://api.groq.com/openai/v1', //Groq: https://api.groq.com/openai/v1 // OpenAI: https://api.openai.com/v1

// Set LAN GPU server, example: http://192.168.1.100:11434/v1
ollamaBaseUrl: process.env.OLLAMA_BASE_URL || 'http://localhost:11434/v1',
};
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
},
"dependencies": {
"@langchain/community": "^0.0.40",
"@langchain/openai": "^0.0.25",
"@radix-ui/react-icons": "^1.3.0",
"@radix-ui/react-label": "^2.0.2",
"@radix-ui/react-separator": "^1.0.3",
Expand Down