Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added providers/burncloud/logo.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
20 changes: 20 additions & 0 deletions providers/burncloud/models/claude-3-5-haiku-20241022.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[model]
name = "Claude 3.5 Haiku (20241022)"
provider = "burncloud"
model_id = "claude-3-5-haiku-20241022"
type = "chat"
description = "Claude 3.5 Haiku model, fast and efficient for most tasks"

[model.context]
max_tokens = 200000
supports_vision = false
supports_tools = true

[model.pricing]
input_price_per_token = 0.0000008 # $0.80 per million input tokens
output_price_per_token = 0.000004 # $4.00 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
20 changes: 20 additions & 0 deletions providers/burncloud/models/claude-3-5-sonnet-20240620.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[model]
name = "Claude 3.5 Sonnet (20240620)"
provider = "burncloud"
model_id = "claude-3-5-sonnet-20240620"
type = "chat"
description = "Claude 3.5 Sonnet model, balanced performance and capability"

[model.context]
max_tokens = 200000
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.000003 # $3.00 per million input tokens
output_price_per_token = 0.000015 # $15.00 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
20 changes: 20 additions & 0 deletions providers/burncloud/models/claude-3-5-sonnet-20241022.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[model]
name = "Claude 3.5 Sonnet (20241022)"
provider = "burncloud"
model_id = "claude-3-5-sonnet-20241022"
type = "chat"
description = "Claude 3.5 Sonnet model, improved version with better capabilities"

[model.context]
max_tokens = 200000
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.000003 # $3.00 per million input tokens
output_price_per_token = 0.000015 # $15.00 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
20 changes: 20 additions & 0 deletions providers/burncloud/models/claude-3-7-sonnet-20250219.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[model]
name = "Claude 3.7 Sonnet (20250219)"
provider = "burncloud"
model_id = "claude-3-7-sonnet-20250219"
type = "chat"
description = "Claude 3.7 Sonnet model, latest generation with enhanced capabilities"

[model.context]
max_tokens = 200000
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.000003 # $3.00 per million input tokens
output_price_per_token = 0.000015 # $15.00 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
20 changes: 20 additions & 0 deletions providers/burncloud/models/claude-sonnet-4-5-20250929.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[model]
name = "Claude Sonnet 4.5 (20250929)"
provider = "burncloud"
model_id = "claude-sonnet-4-5-20250929"
type = "chat"
description = "Claude Sonnet 4.5 model, next-generation performance"

[model.context]
max_tokens = 200000
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.0000025 # $2.50 per million input tokens
output_price_per_token = 0.0000125 # $12.50 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
20 changes: 20 additions & 0 deletions providers/burncloud/models/deepseek-chat.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[model]
name = "DeepSeek Chat"
provider = "burncloud"
model_id = "deepseek-chat"
type = "chat"
description = "DeepSeek Chat model, optimized for conversational tasks"

[model.context]
max_tokens = 128000
supports_vision = false
supports_tools = true

[model.pricing]
input_price_per_token = 0.00000014 # $0.14 per million input tokens
output_price_per_token = 0.00000028 # $0.28 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
20 changes: 20 additions & 0 deletions providers/burncloud/models/deepseek-r1.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[model]
name = "DeepSeek R1"
provider = "burncloud"
model_id = "deepseek-r1"
type = "chat"
description = "DeepSeek R1 model, advanced reasoning capabilities"

[model.context]
max_tokens = 128000
supports_vision = false
supports_tools = true

[model.pricing]
input_price_per_token = 0.00000055 # $0.55 per million input tokens
output_price_per_token = 0.00000219 # $2.19 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
20 changes: 20 additions & 0 deletions providers/burncloud/models/deepseek-v3.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[model]
name = "DeepSeek V3"
provider = "burncloud"
model_id = "deepseek-v3"
type = "chat"
description = "DeepSeek V3 model, latest generation with improved performance"

[model.context]
max_tokens = 128000
supports_vision = false
supports_tools = true

[model.pricing]
input_price_per_token = 0.00000014 # $0.14 per million input tokens
output_price_per_token = 0.00000028 # $0.28 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
21 changes: 21 additions & 0 deletions providers/burncloud/models/gemini-2.0-flash.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "Gemini 2.0 Flash"
provider = "burncloud"
model_id = "gemini-2.0-flash"
type = "chat"
description = "Gemini 2.0 Flash model, fast multimodal capabilities"

[model.context]
max_tokens = 1048576 # 1M tokens
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.000000075 # $0.075 per million input tokens
output_price_per_token = 0.0000003 # $0.30 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
image_generation = true
21 changes: 21 additions & 0 deletions providers/burncloud/models/gemini-2.5-flash.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "Gemini 2.5 Flash"
provider = "burncloud"
model_id = "gemini-2.5-flash"
type = "chat"
description = "Gemini 2.5 Flash model, enhanced speed and capabilities"

[model.context]
max_tokens = 1048576 # 1M tokens
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.000000075 # $0.075 per million input tokens
output_price_per_token = 0.0000003 # $0.30 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
image_generation = true
21 changes: 21 additions & 0 deletions providers/burncloud/models/gemini-2.5-pro.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "Gemini 2.5 Pro"
provider = "burncloud"
model_id = "gemini-2.5-pro"
type = "chat"
description = "Gemini 2.5 Pro model, premium performance for complex tasks"

[model.context]
max_tokens = 2097152 # 2M tokens
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.00000125 # $1.25 per million input tokens
output_price_per_token = 0.000005 # $5.00 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
image_generation = true
21 changes: 21 additions & 0 deletions providers/burncloud/models/gpt-4o-mini.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "GPT-4o Mini"
provider = "burncloud"
model_id = "gpt-4o-mini"
type = "chat"
description = "GPT-4o Mini model, affordable and efficient for most tasks"

[model.context]
max_tokens = 128000
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.00000015 # $0.15 per million input tokens
output_price_per_token = 0.0000006 # $0.60 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
image_vision = true
21 changes: 21 additions & 0 deletions providers/burncloud/models/gpt-4o.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "GPT-4o"
provider = "burncloud"
model_id = "gpt-4o"
type = "chat"
description = "GPT-4o model, flagship multimodal model with vision capabilities"

[model.context]
max_tokens = 128000
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.0000025 # $2.50 per million input tokens
output_price_per_token = 0.00001 # $10.00 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
image_vision = true
21 changes: 21 additions & 0 deletions providers/burncloud/models/gpt-5-mini.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "GPT-5 Mini"
provider = "burncloud"
model_id = "gpt-5-mini"
type = "chat"
description = "GPT-5 Mini model, efficient next-generation model"

[model.context]
max_tokens = 128000
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.00000015 # $0.15 per million input tokens
output_price_per_token = 0.0000006 # $0.60 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
image_vision = true
21 changes: 21 additions & 0 deletions providers/burncloud/models/gpt-5.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "GPT-5"
provider = "burncloud"
model_id = "gpt-5"
type = "chat"
description = "GPT-5 model, next-generation flagship model with enhanced capabilities"

[model.context]
max_tokens = 128000
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.000003 # $3.00 per million input tokens
output_price_per_token = 0.000015 # $15.00 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
image_vision = true
21 changes: 21 additions & 0 deletions providers/burncloud/models/gpt-image-1.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "GPT Image 1"
provider = "burncloud"
model_id = "gpt-image-1"
type = "image_generation"
description = "GPT Image 1 model for high-quality image generation"

[model.context]
max_tokens = 0
supports_vision = false
supports_tools = false

[model.pricing]
input_price_per_token = 0.00004 # $40.00 per image (standard quality)

[model.capabilities]
streaming = false
json_mode = false
function_calling = false
image_generation = true
supported_sizes = ["1024x1024", "1792x1024", "1024x1792"]
21 changes: 21 additions & 0 deletions providers/burncloud/models/grok-2.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[model]
name = "Grok 2"
provider = "burncloud"
model_id = "grok-2"
type = "chat"
description = "Grok 2 model by xAI, witty and knowledgeable conversational AI"

[model.context]
max_tokens = 131072
supports_vision = true
supports_tools = true

[model.pricing]
input_price_per_token = 0.000002 # $2.00 per million input tokens
output_price_per_token = 0.00001 # $10.00 per million output tokens

[model.capabilities]
streaming = true
json_mode = true
function_calling = true
image_vision = true
Loading
Loading