From 9d7bef30c3aa3da3d4b69f3c9974c540f3f2603d Mon Sep 17 00:00:00 2001 From: Justin Yoo Date: Fri, 19 Sep 2025 15:44:38 +0900 Subject: [PATCH 1/4] Add image generation code sample --- .../responses/generate-edit-images.cs | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 docs/guides/images-vision/responses/generate-edit-images.cs diff --git a/docs/guides/images-vision/responses/generate-edit-images.cs b/docs/guides/images-vision/responses/generate-edit-images.cs new file mode 100644 index 000000000..d350f94a2 --- /dev/null +++ b/docs/guides/images-vision/responses/generate-edit-images.cs @@ -0,0 +1,34 @@ +// SAMPLE: Generate images with Responses API +// PAGE: https://platform.openai.com/docs/guides/images-vision?api-mode=responses#generate-or-edit-images +// GUIDANCE: Instructions to run this code: https://aka.ms/oai/net/start +#pragma warning disable OPENAI001 + +#:package OpenAI@2.* +#:property PublishAot=false + +using System.ClientModel; +using System.Text.Json; +using OpenAI.Responses; + +string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!; +OpenAIResponseClient client = new(model: "gpt-image-1", apiKey: key); + +OpenAIResponse response = (OpenAIResponse)client.CreateResponse( + BinaryContent.Create(BinaryData.FromObjectAsJson(new + { + model = "gpt-4.1", + input = "Generate an image of gray tabby cat hugging an otter with an orange scarf", + tools = new[] + { + new + { + type = "image_generation" + } + } + } + )) +); + +var serialised = JsonSerializer.Serialize(response.OutputItems); + +Console.WriteLine(serialised); \ No newline at end of file From 71fff7a731988cb601b4dbb52f6567231788e044 Mon Sep 17 00:00:00 2001 From: Justin Yoo Date: Mon, 22 Sep 2025 13:34:28 +0900 Subject: [PATCH 2/4] Add code samples for the "images and vision" page --- .../analyze_images_passing_base64_string.cs | 50 +++++++++++++++++++ .../responses/analyze_images_passing_file.cs | 30 +++++++++++ .../responses/analyze_images_passing_url.cs | 21 ++++++++ .../responses/generate-edit-images.cs | 2 +- 4 files changed, 102 insertions(+), 1 deletion(-) create mode 100644 docs/guides/images-vision/responses/analyze_images_passing_base64_string.cs create mode 100644 docs/guides/images-vision/responses/analyze_images_passing_file.cs create mode 100644 docs/guides/images-vision/responses/analyze_images_passing_url.cs diff --git a/docs/guides/images-vision/responses/analyze_images_passing_base64_string.cs b/docs/guides/images-vision/responses/analyze_images_passing_base64_string.cs new file mode 100644 index 000000000..003bd9dda --- /dev/null +++ b/docs/guides/images-vision/responses/analyze_images_passing_base64_string.cs @@ -0,0 +1,50 @@ +// SAMPLE: Analyzes image by passing a base64-encoded image through Responses API +// PAGE: https://platform.openai.com/docs/guides/images-vision?api-mode=responses&format=base64-encoded#analyze-images +// GUIDANCE: Instructions to run this code: https://aka.ms/oai/net/start +#pragma warning disable OPENAI001 + +#:package OpenAI@2.* +#:property PublishAot=false + +using OpenAI.Responses; + +string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!; +OpenAIResponseClient client = new(model: "gpt-5", apiKey: key); + +using var http = new HttpClient(); + +// Download an image as stream +using var stream = await http.GetStreamAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png"); + +OpenAIResponse response1 = (OpenAIResponse)client.CreateResponse([ + ResponseItem.CreateUserMessageItem([ + ResponseContentPart.CreateInputTextPart("What is in this image?"), + ResponseContentPart.CreateInputImagePart(BinaryData.FromStream(stream), "image/png") + ]) +]); + +Console.WriteLine($"From image stream: {response1.GetOutputText()}"); + +// Download an image as byte array +byte[] bytes = await http.GetByteArrayAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png"); + +OpenAIResponse response2 = (OpenAIResponse)client.CreateResponse([ + ResponseItem.CreateUserMessageItem([ + ResponseContentPart.CreateInputTextPart("What is in this image?"), + ResponseContentPart.CreateInputImagePart(BinaryData.FromBytes(bytes), "image/png") + ]) +]); + +Console.WriteLine($"From byte array: {response2.GetOutputText()}"); + +// Convert the byte array to a base64 string +string base64 = Convert.ToBase64String(bytes); + +OpenAIResponse response3 = (OpenAIResponse)client.CreateResponse([ + ResponseItem.CreateUserMessageItem([ + ResponseContentPart.CreateInputTextPart("What is in this image?"), + ResponseContentPart.CreateInputImagePart(BinaryData.FromString(base64), "image/png") + ]) +]); + +Console.WriteLine($"From base64 string: {response3.GetOutputText()}"); \ No newline at end of file diff --git a/docs/guides/images-vision/responses/analyze_images_passing_file.cs b/docs/guides/images-vision/responses/analyze_images_passing_file.cs new file mode 100644 index 000000000..6ade80de0 --- /dev/null +++ b/docs/guides/images-vision/responses/analyze_images_passing_file.cs @@ -0,0 +1,30 @@ +// SAMPLE: Analyzes file from a file upload through Responses API +// PAGE: https://platform.openai.com/docs/guides/images-vision?api-mode=responses&format=file#analyze-images +// GUIDANCE: Instructions to run this code: https://aka.ms/oai/net/start +#pragma warning disable OPENAI001 + +#:package OpenAI@2.* +#:property PublishAot=false + +using OpenAI.Files; +using OpenAI.Responses; + +string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!; +OpenAIResponseClient client = new(model: "gpt-5", apiKey: key); + +using var http = new HttpClient(); + +// Download an image as stream +using var stream = await http.GetStreamAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png"); + +OpenAIFileClient files = new(key); +OpenAIFile file = await files.UploadFileAsync(BinaryData.FromStream(stream), "cat_and_otter.png", FileUploadPurpose.Vision); + +OpenAIResponse response = (OpenAIResponse)client.CreateResponse([ + ResponseItem.CreateUserMessageItem([ + ResponseContentPart.CreateInputFilePart(file.Id), + ResponseContentPart.CreateInputTextPart("what's in this image?") + ]) +]); + +Console.WriteLine(response.GetOutputText()); \ No newline at end of file diff --git a/docs/guides/images-vision/responses/analyze_images_passing_url.cs b/docs/guides/images-vision/responses/analyze_images_passing_url.cs new file mode 100644 index 000000000..16bc4d9f5 --- /dev/null +++ b/docs/guides/images-vision/responses/analyze_images_passing_url.cs @@ -0,0 +1,21 @@ +// SAMPLE: Analyzes image by passing an image URL through Responses API +// PAGE: https://platform.openai.com/docs/guides/images-vision?api-mode=responses&format=url#analyze-images +// GUIDANCE: Instructions to run this code: https://aka.ms/oai/net/start +#pragma warning disable OPENAI001 + +#:package OpenAI@2.* +#:property PublishAot=false + +using OpenAI.Responses; + +string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!; +OpenAIResponseClient client = new(model: "gpt-5", apiKey: key); + +OpenAIResponse response = (OpenAIResponse)client.CreateResponse([ + ResponseItem.CreateUserMessageItem([ + ResponseContentPart.CreateInputTextPart("What is in this image?"), + ResponseContentPart.CreateInputImagePart(new Uri("https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg")) + ]) +]); + +Console.WriteLine(response.GetOutputText()); \ No newline at end of file diff --git a/docs/guides/images-vision/responses/generate-edit-images.cs b/docs/guides/images-vision/responses/generate-edit-images.cs index d350f94a2..a9059d7d1 100644 --- a/docs/guides/images-vision/responses/generate-edit-images.cs +++ b/docs/guides/images-vision/responses/generate-edit-images.cs @@ -16,7 +16,7 @@ OpenAIResponse response = (OpenAIResponse)client.CreateResponse( BinaryContent.Create(BinaryData.FromObjectAsJson(new { - model = "gpt-4.1", + model = "gpt-5", input = "Generate an image of gray tabby cat hugging an otter with an orange scarf", tools = new[] { From 5aaabaeb94e60ec61804c16fbbe06761ac074e2e Mon Sep 17 00:00:00 2001 From: Justin Yoo Date: Mon, 29 Sep 2025 18:29:23 +0900 Subject: [PATCH 3/4] Update sample codes for images/vision --- .../analyze_images_passing_base64_string.cs | 19 +++-------- .../responses/analyze_images_passing_file.cs | 8 +++-- .../responses/analyze_images_passing_url.cs | 4 ++- .../responses/generate-edit-images.cs | 34 ------------------- 4 files changed, 12 insertions(+), 53 deletions(-) delete mode 100644 docs/guides/images-vision/responses/generate-edit-images.cs diff --git a/docs/guides/images-vision/responses/analyze_images_passing_base64_string.cs b/docs/guides/images-vision/responses/analyze_images_passing_base64_string.cs index 003bd9dda..1fd56f963 100644 --- a/docs/guides/images-vision/responses/analyze_images_passing_base64_string.cs +++ b/docs/guides/images-vision/responses/analyze_images_passing_base64_string.cs @@ -11,10 +11,11 @@ string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!; OpenAIResponseClient client = new(model: "gpt-5", apiKey: key); -using var http = new HttpClient(); +Uri imageUrl = new("https://openai-documentation.vercel.app/images/cat_and_otter.png"); +using HttpClient http = new(); // Download an image as stream -using var stream = await http.GetStreamAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png"); +using var stream = await http.GetStreamAsync(imageUrl); OpenAIResponse response1 = (OpenAIResponse)client.CreateResponse([ ResponseItem.CreateUserMessageItem([ @@ -26,7 +27,7 @@ Console.WriteLine($"From image stream: {response1.GetOutputText()}"); // Download an image as byte array -byte[] bytes = await http.GetByteArrayAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png"); +byte[] bytes = await http.GetByteArrayAsync(imageUrl); OpenAIResponse response2 = (OpenAIResponse)client.CreateResponse([ ResponseItem.CreateUserMessageItem([ @@ -36,15 +37,3 @@ ]); Console.WriteLine($"From byte array: {response2.GetOutputText()}"); - -// Convert the byte array to a base64 string -string base64 = Convert.ToBase64String(bytes); - -OpenAIResponse response3 = (OpenAIResponse)client.CreateResponse([ - ResponseItem.CreateUserMessageItem([ - ResponseContentPart.CreateInputTextPart("What is in this image?"), - ResponseContentPart.CreateInputImagePart(BinaryData.FromString(base64), "image/png") - ]) -]); - -Console.WriteLine($"From base64 string: {response3.GetOutputText()}"); \ No newline at end of file diff --git a/docs/guides/images-vision/responses/analyze_images_passing_file.cs b/docs/guides/images-vision/responses/analyze_images_passing_file.cs index 6ade80de0..559db0f22 100644 --- a/docs/guides/images-vision/responses/analyze_images_passing_file.cs +++ b/docs/guides/images-vision/responses/analyze_images_passing_file.cs @@ -12,17 +12,19 @@ string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!; OpenAIResponseClient client = new(model: "gpt-5", apiKey: key); +string filename = "cat_and_otter.png"; +Uri imageUrl = new($"https://openai-documentation.vercel.app/images/{filename}"); using var http = new HttpClient(); // Download an image as stream -using var stream = await http.GetStreamAsync("https://openai-documentation.vercel.app/images/cat_and_otter.png"); +using var stream = await http.GetStreamAsync(imageUrl); OpenAIFileClient files = new(key); -OpenAIFile file = await files.UploadFileAsync(BinaryData.FromStream(stream), "cat_and_otter.png", FileUploadPurpose.Vision); +OpenAIFile file = await files.UploadFileAsync(BinaryData.FromStream(stream), filename, FileUploadPurpose.Vision); OpenAIResponse response = (OpenAIResponse)client.CreateResponse([ ResponseItem.CreateUserMessageItem([ - ResponseContentPart.CreateInputFilePart(file.Id), + ResponseContentPart.CreateInputImagePart(file.Id), ResponseContentPart.CreateInputTextPart("what's in this image?") ]) ]); diff --git a/docs/guides/images-vision/responses/analyze_images_passing_url.cs b/docs/guides/images-vision/responses/analyze_images_passing_url.cs index 16bc4d9f5..003da272d 100644 --- a/docs/guides/images-vision/responses/analyze_images_passing_url.cs +++ b/docs/guides/images-vision/responses/analyze_images_passing_url.cs @@ -11,10 +11,12 @@ string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!; OpenAIResponseClient client = new(model: "gpt-5", apiKey: key); +Uri imageUrl = new("https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"); + OpenAIResponse response = (OpenAIResponse)client.CreateResponse([ ResponseItem.CreateUserMessageItem([ ResponseContentPart.CreateInputTextPart("What is in this image?"), - ResponseContentPart.CreateInputImagePart(new Uri("https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg")) + ResponseContentPart.CreateInputImagePart(imageUrl) ]) ]); diff --git a/docs/guides/images-vision/responses/generate-edit-images.cs b/docs/guides/images-vision/responses/generate-edit-images.cs deleted file mode 100644 index a9059d7d1..000000000 --- a/docs/guides/images-vision/responses/generate-edit-images.cs +++ /dev/null @@ -1,34 +0,0 @@ -// SAMPLE: Generate images with Responses API -// PAGE: https://platform.openai.com/docs/guides/images-vision?api-mode=responses#generate-or-edit-images -// GUIDANCE: Instructions to run this code: https://aka.ms/oai/net/start -#pragma warning disable OPENAI001 - -#:package OpenAI@2.* -#:property PublishAot=false - -using System.ClientModel; -using System.Text.Json; -using OpenAI.Responses; - -string key = Environment.GetEnvironmentVariable("OPENAI_API_KEY")!; -OpenAIResponseClient client = new(model: "gpt-image-1", apiKey: key); - -OpenAIResponse response = (OpenAIResponse)client.CreateResponse( - BinaryContent.Create(BinaryData.FromObjectAsJson(new - { - model = "gpt-5", - input = "Generate an image of gray tabby cat hugging an otter with an orange scarf", - tools = new[] - { - new - { - type = "image_generation" - } - } - } - )) -); - -var serialised = JsonSerializer.Serialize(response.OutputItems); - -Console.WriteLine(serialised); \ No newline at end of file From b7d097978bf5c545d71a85a0cd0fca9e523f9978 Mon Sep 17 00:00:00 2001 From: Justin Yoo Date: Wed, 1 Oct 2025 17:40:32 +0900 Subject: [PATCH 4/4] Change the order of prompts to make all the samples consistent --- .../images-vision/responses/analyze_images_passing_file.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/images-vision/responses/analyze_images_passing_file.cs b/docs/guides/images-vision/responses/analyze_images_passing_file.cs index 559db0f22..0cb3fd622 100644 --- a/docs/guides/images-vision/responses/analyze_images_passing_file.cs +++ b/docs/guides/images-vision/responses/analyze_images_passing_file.cs @@ -24,8 +24,8 @@ OpenAIResponse response = (OpenAIResponse)client.CreateResponse([ ResponseItem.CreateUserMessageItem([ - ResponseContentPart.CreateInputImagePart(file.Id), - ResponseContentPart.CreateInputTextPart("what's in this image?") + ResponseContentPart.CreateInputTextPart("what's in this image?"), + ResponseContentPart.CreateInputImagePart(file.Id) ]) ]);