diff --git a/providers/burncloud/logo.svg b/providers/burncloud/logo.svg new file mode 100644 index 00000000..adcd012c Binary files /dev/null and b/providers/burncloud/logo.svg differ diff --git a/providers/burncloud/models/claude-3-5-haiku-20241022.toml b/providers/burncloud/models/claude-3-5-haiku-20241022.toml new file mode 100644 index 00000000..21b815ad --- /dev/null +++ b/providers/burncloud/models/claude-3-5-haiku-20241022.toml @@ -0,0 +1,20 @@ +[model] +name = "Claude 3.5 Haiku (20241022)" +provider = "burncloud" +model_id = "claude-3-5-haiku-20241022" +type = "chat" +description = "Claude 3.5 Haiku model, fast and efficient for most tasks" + +[model.context] +max_tokens = 200000 +supports_vision = false +supports_tools = true + +[model.pricing] +input_price_per_token = 0.0000008 # $0.80 per million input tokens +output_price_per_token = 0.000004 # $4.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/claude-3-5-sonnet-20240620.toml b/providers/burncloud/models/claude-3-5-sonnet-20240620.toml new file mode 100644 index 00000000..a960b5dc --- /dev/null +++ b/providers/burncloud/models/claude-3-5-sonnet-20240620.toml @@ -0,0 +1,20 @@ +[model] +name = "Claude 3.5 Sonnet (20240620)" +provider = "burncloud" +model_id = "claude-3-5-sonnet-20240620" +type = "chat" +description = "Claude 3.5 Sonnet model, balanced performance and capability" + +[model.context] +max_tokens = 200000 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000003 # $3.00 per million input tokens +output_price_per_token = 0.000015 # $15.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/claude-3-5-sonnet-20241022.toml b/providers/burncloud/models/claude-3-5-sonnet-20241022.toml new file mode 100644 index 00000000..3cbbe08d --- /dev/null +++ b/providers/burncloud/models/claude-3-5-sonnet-20241022.toml @@ -0,0 +1,20 @@ +[model] +name = "Claude 3.5 Sonnet (20241022)" +provider = "burncloud" +model_id = "claude-3-5-sonnet-20241022" +type = "chat" +description = "Claude 3.5 Sonnet model, improved version with better capabilities" + +[model.context] +max_tokens = 200000 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000003 # $3.00 per million input tokens +output_price_per_token = 0.000015 # $15.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/claude-3-7-sonnet-20250219.toml b/providers/burncloud/models/claude-3-7-sonnet-20250219.toml new file mode 100644 index 00000000..daa91bda --- /dev/null +++ b/providers/burncloud/models/claude-3-7-sonnet-20250219.toml @@ -0,0 +1,20 @@ +[model] +name = "Claude 3.7 Sonnet (20250219)" +provider = "burncloud" +model_id = "claude-3-7-sonnet-20250219" +type = "chat" +description = "Claude 3.7 Sonnet model, latest generation with enhanced capabilities" + +[model.context] +max_tokens = 200000 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000003 # $3.00 per million input tokens +output_price_per_token = 0.000015 # $15.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/claude-sonnet-4-5-20250929.toml b/providers/burncloud/models/claude-sonnet-4-5-20250929.toml new file mode 100644 index 00000000..98c929e6 --- /dev/null +++ b/providers/burncloud/models/claude-sonnet-4-5-20250929.toml @@ -0,0 +1,20 @@ +[model] +name = "Claude Sonnet 4.5 (20250929)" +provider = "burncloud" +model_id = "claude-sonnet-4-5-20250929" +type = "chat" +description = "Claude Sonnet 4.5 model, next-generation performance" + +[model.context] +max_tokens = 200000 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.0000025 # $2.50 per million input tokens +output_price_per_token = 0.0000125 # $12.50 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/deepseek-chat.toml b/providers/burncloud/models/deepseek-chat.toml new file mode 100644 index 00000000..949a0fd4 --- /dev/null +++ b/providers/burncloud/models/deepseek-chat.toml @@ -0,0 +1,20 @@ +[model] +name = "DeepSeek Chat" +provider = "burncloud" +model_id = "deepseek-chat" +type = "chat" +description = "DeepSeek Chat model, optimized for conversational tasks" + +[model.context] +max_tokens = 128000 +supports_vision = false +supports_tools = true + +[model.pricing] +input_price_per_token = 0.00000014 # $0.14 per million input tokens +output_price_per_token = 0.00000028 # $0.28 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/deepseek-r1.toml b/providers/burncloud/models/deepseek-r1.toml new file mode 100644 index 00000000..1e8c12bc --- /dev/null +++ b/providers/burncloud/models/deepseek-r1.toml @@ -0,0 +1,20 @@ +[model] +name = "DeepSeek R1" +provider = "burncloud" +model_id = "deepseek-r1" +type = "chat" +description = "DeepSeek R1 model, advanced reasoning capabilities" + +[model.context] +max_tokens = 128000 +supports_vision = false +supports_tools = true + +[model.pricing] +input_price_per_token = 0.00000055 # $0.55 per million input tokens +output_price_per_token = 0.00000219 # $2.19 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/deepseek-v3.toml b/providers/burncloud/models/deepseek-v3.toml new file mode 100644 index 00000000..905faf87 --- /dev/null +++ b/providers/burncloud/models/deepseek-v3.toml @@ -0,0 +1,20 @@ +[model] +name = "DeepSeek V3" +provider = "burncloud" +model_id = "deepseek-v3" +type = "chat" +description = "DeepSeek V3 model, latest generation with improved performance" + +[model.context] +max_tokens = 128000 +supports_vision = false +supports_tools = true + +[model.pricing] +input_price_per_token = 0.00000014 # $0.14 per million input tokens +output_price_per_token = 0.00000028 # $0.28 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/gemini-2.0-flash.toml b/providers/burncloud/models/gemini-2.0-flash.toml new file mode 100644 index 00000000..75021385 --- /dev/null +++ b/providers/burncloud/models/gemini-2.0-flash.toml @@ -0,0 +1,21 @@ +[model] +name = "Gemini 2.0 Flash" +provider = "burncloud" +model_id = "gemini-2.0-flash" +type = "chat" +description = "Gemini 2.0 Flash model, fast multimodal capabilities" + +[model.context] +max_tokens = 1048576 # 1M tokens +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000000075 # $0.075 per million input tokens +output_price_per_token = 0.0000003 # $0.30 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_generation = true \ No newline at end of file diff --git a/providers/burncloud/models/gemini-2.5-flash.toml b/providers/burncloud/models/gemini-2.5-flash.toml new file mode 100644 index 00000000..b2d64ee0 --- /dev/null +++ b/providers/burncloud/models/gemini-2.5-flash.toml @@ -0,0 +1,21 @@ +[model] +name = "Gemini 2.5 Flash" +provider = "burncloud" +model_id = "gemini-2.5-flash" +type = "chat" +description = "Gemini 2.5 Flash model, enhanced speed and capabilities" + +[model.context] +max_tokens = 1048576 # 1M tokens +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000000075 # $0.075 per million input tokens +output_price_per_token = 0.0000003 # $0.30 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_generation = true \ No newline at end of file diff --git a/providers/burncloud/models/gemini-2.5-pro.toml b/providers/burncloud/models/gemini-2.5-pro.toml new file mode 100644 index 00000000..8ea86d33 --- /dev/null +++ b/providers/burncloud/models/gemini-2.5-pro.toml @@ -0,0 +1,21 @@ +[model] +name = "Gemini 2.5 Pro" +provider = "burncloud" +model_id = "gemini-2.5-pro" +type = "chat" +description = "Gemini 2.5 Pro model, premium performance for complex tasks" + +[model.context] +max_tokens = 2097152 # 2M tokens +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.00000125 # $1.25 per million input tokens +output_price_per_token = 0.000005 # $5.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_generation = true \ No newline at end of file diff --git a/providers/burncloud/models/gpt-4o-mini.toml b/providers/burncloud/models/gpt-4o-mini.toml new file mode 100644 index 00000000..118a82c7 --- /dev/null +++ b/providers/burncloud/models/gpt-4o-mini.toml @@ -0,0 +1,21 @@ +[model] +name = "GPT-4o Mini" +provider = "burncloud" +model_id = "gpt-4o-mini" +type = "chat" +description = "GPT-4o Mini model, affordable and efficient for most tasks" + +[model.context] +max_tokens = 128000 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.00000015 # $0.15 per million input tokens +output_price_per_token = 0.0000006 # $0.60 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_vision = true \ No newline at end of file diff --git a/providers/burncloud/models/gpt-4o.toml b/providers/burncloud/models/gpt-4o.toml new file mode 100644 index 00000000..30c3c604 --- /dev/null +++ b/providers/burncloud/models/gpt-4o.toml @@ -0,0 +1,21 @@ +[model] +name = "GPT-4o" +provider = "burncloud" +model_id = "gpt-4o" +type = "chat" +description = "GPT-4o model, flagship multimodal model with vision capabilities" + +[model.context] +max_tokens = 128000 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.0000025 # $2.50 per million input tokens +output_price_per_token = 0.00001 # $10.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_vision = true \ No newline at end of file diff --git a/providers/burncloud/models/gpt-5-mini.toml b/providers/burncloud/models/gpt-5-mini.toml new file mode 100644 index 00000000..178da207 --- /dev/null +++ b/providers/burncloud/models/gpt-5-mini.toml @@ -0,0 +1,21 @@ +[model] +name = "GPT-5 Mini" +provider = "burncloud" +model_id = "gpt-5-mini" +type = "chat" +description = "GPT-5 Mini model, efficient next-generation model" + +[model.context] +max_tokens = 128000 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.00000015 # $0.15 per million input tokens +output_price_per_token = 0.0000006 # $0.60 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_vision = true \ No newline at end of file diff --git a/providers/burncloud/models/gpt-5.toml b/providers/burncloud/models/gpt-5.toml new file mode 100644 index 00000000..fca0280b --- /dev/null +++ b/providers/burncloud/models/gpt-5.toml @@ -0,0 +1,21 @@ +[model] +name = "GPT-5" +provider = "burncloud" +model_id = "gpt-5" +type = "chat" +description = "GPT-5 model, next-generation flagship model with enhanced capabilities" + +[model.context] +max_tokens = 128000 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000003 # $3.00 per million input tokens +output_price_per_token = 0.000015 # $15.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_vision = true \ No newline at end of file diff --git a/providers/burncloud/models/gpt-image-1.toml b/providers/burncloud/models/gpt-image-1.toml new file mode 100644 index 00000000..6d055692 --- /dev/null +++ b/providers/burncloud/models/gpt-image-1.toml @@ -0,0 +1,21 @@ +[model] +name = "GPT Image 1" +provider = "burncloud" +model_id = "gpt-image-1" +type = "image_generation" +description = "GPT Image 1 model for high-quality image generation" + +[model.context] +max_tokens = 0 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.00004 # $40.00 per image (standard quality) + +[model.capabilities] +streaming = false +json_mode = false +function_calling = false +image_generation = true +supported_sizes = ["1024x1024", "1792x1024", "1024x1792"] \ No newline at end of file diff --git a/providers/burncloud/models/grok-2.toml b/providers/burncloud/models/grok-2.toml new file mode 100644 index 00000000..6119d09b --- /dev/null +++ b/providers/burncloud/models/grok-2.toml @@ -0,0 +1,21 @@ +[model] +name = "Grok 2" +provider = "burncloud" +model_id = "grok-2" +type = "chat" +description = "Grok 2 model by xAI, witty and knowledgeable conversational AI" + +[model.context] +max_tokens = 131072 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000002 # $2.00 per million input tokens +output_price_per_token = 0.00001 # $10.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_vision = true \ No newline at end of file diff --git a/providers/burncloud/models/grok-3.toml b/providers/burncloud/models/grok-3.toml new file mode 100644 index 00000000..4dc2f54f --- /dev/null +++ b/providers/burncloud/models/grok-3.toml @@ -0,0 +1,21 @@ +[model] +name = "Grok 3" +provider = "burncloud" +model_id = "grok-3" +type = "chat" +description = "Grok 3 model, advanced reasoning and knowledge capabilities" + +[model.context] +max_tokens = 131072 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000003 # $3.00 per million input tokens +output_price_per_token = 0.000015 # $15.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_vision = true \ No newline at end of file diff --git a/providers/burncloud/models/grok-4.toml b/providers/burncloud/models/grok-4.toml new file mode 100644 index 00000000..9bfcc6fa --- /dev/null +++ b/providers/burncloud/models/grok-4.toml @@ -0,0 +1,21 @@ +[model] +name = "Grok 4" +provider = "burncloud" +model_id = "grok-4" +type = "chat" +description = "Grok 4 model, latest generation with enhanced performance" + +[model.context] +max_tokens = 131072 +supports_vision = true +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000003 # $3.00 per million input tokens +output_price_per_token = 0.000015 # $15.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +image_vision = true \ No newline at end of file diff --git a/providers/burncloud/models/o1-mini.toml b/providers/burncloud/models/o1-mini.toml new file mode 100644 index 00000000..7bd74c4a --- /dev/null +++ b/providers/burncloud/models/o1-mini.toml @@ -0,0 +1,21 @@ +[model] +name = "OpenAI o1 Mini" +provider = "burncloud" +model_id = "o1-mini" +type = "chat" +description = "OpenAI o1 Mini model, efficient reasoning for coding and math" + +[model.context] +max_tokens = 128000 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.000003 # $3.00 per million input tokens +output_price_per_token = 0.000012 # $12.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = false +function_calling = false +reasoning = true \ No newline at end of file diff --git a/providers/burncloud/models/o1.toml b/providers/burncloud/models/o1.toml new file mode 100644 index 00000000..d6140e8c --- /dev/null +++ b/providers/burncloud/models/o1.toml @@ -0,0 +1,21 @@ +[model] +name = "OpenAI o1" +provider = "burncloud" +model_id = "o1" +type = "chat" +description = "OpenAI o1 model, optimized for complex reasoning tasks" + +[model.context] +max_tokens = 128000 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.000015 # $15.00 per million input tokens +output_price_per_token = 0.00006 # $60.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = false +function_calling = false +reasoning = true \ No newline at end of file diff --git a/providers/burncloud/models/o3-mini.toml b/providers/burncloud/models/o3-mini.toml new file mode 100644 index 00000000..e903a5e2 --- /dev/null +++ b/providers/burncloud/models/o3-mini.toml @@ -0,0 +1,21 @@ +[model] +name = "OpenAI o3 Mini" +provider = "burncloud" +model_id = "o3-mini" +type = "chat" +description = "OpenAI o3 Mini model, efficient advanced reasoning" + +[model.context] +max_tokens = 200000 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.000015 # $15.00 per million input tokens +output_price_per_token = 0.00006 # $60.00 per million output tokens + +[model.capabilities] +streaming = false +json_mode = false +function_calling = false +reasoning = true \ No newline at end of file diff --git a/providers/burncloud/models/o3.toml b/providers/burncloud/models/o3.toml new file mode 100644 index 00000000..e3e07c59 --- /dev/null +++ b/providers/burncloud/models/o3.toml @@ -0,0 +1,21 @@ +[model] +name = "OpenAI o3" +provider = "burncloud" +model_id = "o3" +type = "chat" +description = "OpenAI o3 model, advanced reasoning capabilities" + +[model.context] +max_tokens = 200000 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.00006 # $60.00 per million input tokens +output_price_per_token = 0.00024 # $240.00 per million output tokens + +[model.capabilities] +streaming = false +json_mode = false +function_calling = false +reasoning = true \ No newline at end of file diff --git a/providers/burncloud/models/qwen3-235b-a22b.toml b/providers/burncloud/models/qwen3-235b-a22b.toml new file mode 100644 index 00000000..541617f5 --- /dev/null +++ b/providers/burncloud/models/qwen3-235b-a22b.toml @@ -0,0 +1,20 @@ +[model] +name = "Qwen3 235B A22B" +provider = "burncloud" +model_id = "qwen3-235b-a22b" +type = "chat" +description = "Qwen3 235B A22B model, large-scale language model with extensive knowledge" + +[model.context] +max_tokens = 32768 +supports_vision = false +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000001 # $1.00 per million input tokens +output_price_per_token = 0.000002 # $2.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true \ No newline at end of file diff --git a/providers/burncloud/models/qwen3-coder-480b-a35b-instruct.toml b/providers/burncloud/models/qwen3-coder-480b-a35b-instruct.toml new file mode 100644 index 00000000..002e0c0d --- /dev/null +++ b/providers/burncloud/models/qwen3-coder-480b-a35b-instruct.toml @@ -0,0 +1,21 @@ +[model] +name = "Qwen3 Coder 480B A35B Instruct" +provider = "burncloud" +model_id = "qwen3-coder-480b-a35b-instruct" +type = "chat" +description = "Qwen3 Coder 480B A35B Instruct model, specialized for coding tasks" + +[model.context] +max_tokens = 32768 +supports_vision = false +supports_tools = true + +[model.pricing] +input_price_per_token = 0.000002 # $2.00 per million input tokens +output_price_per_token = 0.000006 # $6.00 per million output tokens + +[model.capabilities] +streaming = true +json_mode = true +function_calling = true +code_generation = true \ No newline at end of file diff --git a/providers/burncloud/models/sora-2.toml b/providers/burncloud/models/sora-2.toml new file mode 100644 index 00000000..49907b61 --- /dev/null +++ b/providers/burncloud/models/sora-2.toml @@ -0,0 +1,22 @@ +[model] +name = "Sora 2" +provider = "burncloud" +model_id = "sora-2" +type = "video_generation" +description = "Sora 2 model for advanced video generation from text prompts" + +[model.context] +max_tokens = 0 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.000002 # $2.00 per video generation + +[model.capabilities] +streaming = false +json_mode = false +function_calling = false +video_generation = true +supported_durations = [5, 10, 20] # seconds +supported_resolutions = ["1280x720", "1920x1080"] \ No newline at end of file diff --git a/providers/burncloud/models/text-embedding-3-large.toml b/providers/burncloud/models/text-embedding-3-large.toml new file mode 100644 index 00000000..424ce37d --- /dev/null +++ b/providers/burncloud/models/text-embedding-3-large.toml @@ -0,0 +1,20 @@ +[model] +name = "Text Embedding 3 Large" +provider = "burncloud" +model_id = "text-embedding-3-large" +type = "embeddings" +description = "Large text embedding model for high-quality vector representations" + +[model.context] +max_tokens = 8191 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.00000013 # $0.13 per million input tokens + +[model.capabilities] +streaming = false +json_mode = false +function_calling = false +embedding_dimensions = 3072 \ No newline at end of file diff --git a/providers/burncloud/models/text-embedding-3-small.toml b/providers/burncloud/models/text-embedding-3-small.toml new file mode 100644 index 00000000..73a55174 --- /dev/null +++ b/providers/burncloud/models/text-embedding-3-small.toml @@ -0,0 +1,20 @@ +[model] +name = "Text Embedding 3 Small" +provider = "burncloud" +model_id = "text-embedding-3-small" +type = "embeddings" +description = "Small text embedding model for efficient vector representations" + +[model.context] +max_tokens = 8191 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.00000002 # $0.02 per million input tokens + +[model.capabilities] +streaming = false +json_mode = false +function_calling = false +embedding_dimensions = 1536 \ No newline at end of file diff --git a/providers/burncloud/models/tts-1.toml b/providers/burncloud/models/tts-1.toml new file mode 100644 index 00000000..c896a8d6 --- /dev/null +++ b/providers/burncloud/models/tts-1.toml @@ -0,0 +1,22 @@ +[model] +name = "TTS 1" +provider = "burncloud" +model_id = "tts-1" +type = "audio_speech" +description = "TTS 1 model for text-to-speech generation" + +[model.context] +max_tokens = 0 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.000015 # $15.00 per million characters + +[model.capabilities] +streaming = true +json_mode = false +function_calling = false +audio_speech = true +supported_voices = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] +output_format = "mp3" \ No newline at end of file diff --git a/providers/burncloud/models/whisper-1.toml b/providers/burncloud/models/whisper-1.toml new file mode 100644 index 00000000..7c1c9112 --- /dev/null +++ b/providers/burncloud/models/whisper-1.toml @@ -0,0 +1,22 @@ +[model] +name = "Whisper 1" +provider = "burncloud" +model_id = "whisper-1" +type = "audio_transcription" +description = "Whisper 1 model for high-quality audio transcription" + +[model.context] +max_tokens = 0 +supports_vision = false +supports_tools = false + +[model.pricing] +input_price_per_token = 0.000006 # $6.00 per minute + +[model.capabilities] +streaming = false +json_mode = false +function_calling = false +audio_transcription = true +supported_formats = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"] +supported_languages = ["en", "zh", "de", "es", "fr", "ja", "ru"] \ No newline at end of file diff --git a/providers/burncloud/provider.toml b/providers/burncloud/provider.toml new file mode 100644 index 00000000..f13e038e --- /dev/null +++ b/providers/burncloud/provider.toml @@ -0,0 +1,21 @@ +[provider] +name = "BurnCloud" +base_url = "https://ai.burncloud.com" +logo = "logo.svg" +description = "BurnCloud provides access to various AI models including Claude, DeepSeek, Gemini, GPT, Grok, and more." + +[provider.auth] +type = "api_key" +header_name = "Authorization" +header_format = "Bearer {api_key}" + +[provider.endpoints] +chat = "/v1/chat/completions" +embeddings = "/v1/embeddings" +image_generation = "/v1/images/generations" +audio_transcription = "/v1/audio/transcriptions" +audio_speech = "/v1/audio/speech" +video_generation = "/v1/videos/generations" + +[provider.ratelimits] +default = "60/1m" \ No newline at end of file