A unified Go client library for interacting with multiple Large Language Model (LLM) providers through a consistent interface. EasyLLM abstracts the complexities of different LLM APIs, providing a seamless experience for developers working with AI models.
- OpenAI - GPT models, embeddings, and DALL-E image generation
- Claude - Anthropic's Claude models with advanced reasoning
- Gemini - Google's Gemini AI models
- DeepSeek - DeepSeek's reasoning and coding models
- Azure OpenAI - Enterprise-grade OpenAI models via Azure
- OpenRouter - Access to multiple models through OpenRouter's API
- Consistent API across all providers
- Seamless provider switching without code changes
- Standardized request/response formats
- Streaming Support - Real-time response streaming for better user experience
- Function Calling - Tool use and function calling capabilities
- Embeddings - Text embedding generation (where supported)
- Image Generation - AI image creation (where supported)
- Cost Calculation - Built-in pricing and usage cost tracking
- JSON Schema - Automatic schema generation for structured outputs
- Input validation and error handling
- Comprehensive test coverage
- Template-based prompt management
- Cache optimization for performance
- Concurrent request support
go get github.com/easyagent-dev/llm
package main
import (
"context"
"fmt"
"log"
"github.com/easyagent-dev/llm"
"github.com/easyagent-dev/llm/types"
"github.com/easyagent-dev/llm/types/completion"
)
func main() {
// Initialize OpenAI client
model, err := llm.NewOpenAIModel(
types.WithAPIKey("your-openai-api-key"),
)
if err != nil {
log.Fatal(err)
}
// Create a request
req := &completion.CompletionRequest{
Model: "gpt-4o-mini",
Instructions: "You are a helpful assistant.",
Messages: []*types.ModelMessage{
{
Role: types.MessageRoleUser,
Content: "What is the capital of France?",
},
},
Options: []completion.CompletionOption{
completion.WithCost(true),
},
}
// Generate response
resp, err := model.Complete(context.Background(), req, nil)
if err != nil {
log.Fatal(err)
}
fmt.Println("Response:", resp.Output)
if resp.Cost != nil {
fmt.Printf("Cost: $%.6f\n", *resp.Cost)
}
}
func streamExample() {
model, _ := llm.NewOpenAIModel(
types.WithAPIKey("your-api-key"),
)
req := &completion.CompletionRequest{
Model: "gpt-4o-mini",
Instructions: "You are a helpful assistant.",
Messages: []*types.ModelMessage{
{
Role: types.MessageRoleUser,
Content: "Write a short story about AI",
},
},
}
stream, err := model.StreamComplete(context.Background(), req, nil)
if err != nil {
log.Fatal(err)
}
for chunk := range stream {
switch c := chunk.(type) {
case types.StreamTextChunk:
fmt.Print(c.Text)
case types.StreamUsageChunk:
fmt.Printf("\nTokens used: %d\n", c.Usage.TotalInputTokens+c.Usage.TotalOutputTokens)
}
}
}
func multiProviderExample() {
// Initialize multiple providers
openai, _ := llm.NewOpenAIModel(
types.WithAPIKey("openai-key"),
)
deepseek, _ := llm.NewDeepSeekModel(
types.WithAPIKey("deepseek-key"),
)
req := &completion.CompletionRequest{
Model: "gpt-4o-mini", // or "deepseek-chat", etc.
Instructions: "You are a helpful assistant.",
Messages: []*types.ModelMessage{
{
Role: types.MessageRoleUser,
Content: "Explain quantum computing",
},
},
}
// Use OpenAI
resp1, _ := openai.Complete(context.Background(), req, nil)
fmt.Printf("OpenAI Response: %s\n\n", resp1.Output)
// Use DeepSeek with same request structure
req.Model = "deepseek-chat"
resp2, _ := deepseek.Complete(context.Background(), req, nil)
fmt.Printf("DeepSeek Response: %s\n\n", resp2.Output)
}
func reasoningExample() {
// Use reasoning model with completion API
model, _ := llm.NewOpenAIModel(
types.WithAPIKey("your-api-key"),
)
req := &completion.CompletionRequest{
Model: "o4-mini",
Instructions: "You are a helpful assistant.",
Messages: []*types.ModelMessage{
{
Role: types.MessageRoleUser,
Content: "Solve this logic puzzle: If all A are B, and all B are C, what can we conclude?",
},
},
Options: []completion.CompletionOption{
completion.WithReasoningEffort(completion.ReasoningEffortLow),
},
}
resp, _ := model.Complete(context.Background(), req, nil)
fmt.Printf("Response: %s\n", resp.Output)
// Access reasoning tokens if available
if resp.Usage != nil {
fmt.Printf("Reasoning tokens: %d\n", resp.Usage.TotalReasoningTokens)
}
}
func conversationExample() {
// Use conversation API for advanced reasoning
model, _ := llm.NewOpenAIConversationModel(
types.WithAPIKey("your-api-key"),
)
req := &conversation.ConversationRequest{
Model: "o4-mini",
Input: "Explain the theory of relativity in simple terms.",
Options: []conversation.ResponseOption{
conversation.WithReasoningEffort(conversation.ReasoningEffortMedium),
conversation.WithReasoningSummary("detailed"),
},
}
stream, err := model.StreamResponse(context.Background(), req, nil)
if err != nil {
log.Fatal(err)
}
for chunk := range stream {
switch c := chunk.(type) {
case types.StreamTextChunk:
fmt.Print(c.Text)
}
}
fmt.Println()
}
- Chat Models: GPT-4o, GPT-4, GPT-3.5-turbo, o1-preview, o1-mini
- Embedding Models: text-embedding-3-large, text-embedding-3-small, text-embedding-ada-002
- Image Models: DALL-E 3, DALL-E 2
- Chat Models: Claude 3.5 Sonnet, Claude 3 Opus, Claude 3 Sonnet, Claude 3 Haiku
- Chat Models: Gemini Pro, Gemini Pro Vision, Gemini 1.5 Pro, Gemini 1.5 Flash
- Chat Models: DeepSeek V3, DeepSeek Coder, DeepSeek Chat
- All OpenAI models available through Azure's enterprise platform
- Access to 200+ models from various providers through a single API
export OPENAI_API_KEY="your-openai-key"
export CLAUDE_API_KEY="your-claude-key"
export GEMINI_API_KEY="your-gemini-key"
export DEEPSEEK_API_KEY="your-deepseek-key"
// OpenAI with custom base URL
openai, _ := llm.NewOpenAIModel(
types.WithAPIKey("key"),
types.WithBaseURL("https://api.openai.com/v1"), // Optional
)
// DeepSeek
deepseek, _ := llm.NewDeepSeekModel(
types.WithAPIKey("key"),
)
// Using environment variables
model, _ := llm.NewOpenAIModel(
types.WithAPIKey(os.Getenv("OPENAI_API_KEY")),
)
resp, err := client.GenerateContent(ctx, req)
if err != nil {
// Handle different error types
switch {
case strings.Contains(err.Error(), "rate limit"):
// Handle rate limiting
time.Sleep(time.Minute)
return retry()
case strings.Contains(err.Error(), "insufficient_quota"):
// Handle quota exceeded
return handleQuotaError()
default:
// Handle other errors
log.Printf("API Error: %v", err)
}
}
// Enable cost tracking with options
req := &completion.CompletionRequest{
Model: "gpt-4o-mini",
Instructions: "You are a helpful assistant.",
Messages: []*types.ModelMessage{
{
Role: types.MessageRoleUser,
Content: "Hello!",
},
},
Options: []completion.CompletionOption{
completion.WithCost(true),
completion.WithUsage(true),
},
}
resp, _ := model.Complete(ctx, req, nil)
// Access cost information
if resp.Cost != nil {
fmt.Printf("Request cost: $%.6f\n", *resp.Cost)
}
// Access detailed usage
if resp.Usage != nil {
fmt.Printf("Input tokens: %d\n", resp.Usage.TotalInputTokens)
fmt.Printf("Output tokens: %d\n", resp.Usage.TotalOutputTokens)
fmt.Printf("Reasoning tokens: %d\n", resp.Usage.TotalReasoningTokens)
}
Run the test suite:
go test ./...
Run tests with coverage:
go test -cover ./...
Run benchmarks:
go test -bench=. ./...
We welcome contributions! Please see our Contributing Guidelines for details.
- Clone the repository:
git clone https://github.com/easyagent-dev/llm.git
cd llm
- Install dependencies:
go mod download
- Run tests:
go test ./...
- Create your feature branch:
git checkout -b feature/amazing-feature
- Commit your changes:
git commit -m 'Add amazing feature'
- Push to the branch:
git push origin feature/amazing-feature
- Open a Pull Request
This project is licensed under the Apache License 2.0 - see the LICENSE file for details.
- π Documentation
- π Issue Tracker
- π¬ Discussions
- Additional provider support (Cohere, Hugging Face, etc.)
- Advanced retry mechanisms with exponential backoff
- Built-in prompt templates and management
- Metrics and monitoring integration
- Async/batch processing capabilities
- Model performance benchmarking tools
Made with β€οΈ by the EasyMVP team