diff --git a/firebaseai/testapp/Assets/Firebase/Sample/FirebaseAI/UIHandlerAutomated.cs b/firebaseai/testapp/Assets/Firebase/Sample/FirebaseAI/UIHandlerAutomated.cs index e31510ad..ba09496a 100644 --- a/firebaseai/testapp/Assets/Firebase/Sample/FirebaseAI/UIHandlerAutomated.cs +++ b/firebaseai/testapp/Assets/Firebase/Sample/FirebaseAI/UIHandlerAutomated.cs @@ -15,7 +15,8 @@ // Uncomment to include logic to sign in to Auth as part of the tests //#define INCLUDE_FIREBASE_AUTH -namespace Firebase.Sample.FirebaseAI { +namespace Firebase.Sample.FirebaseAI +{ using Firebase; using Firebase.AI; using Firebase.Extensions; @@ -34,7 +35,8 @@ namespace Firebase.Sample.FirebaseAI { #endif // An automated version of the UIHandler that runs tests on Firebase AI. - public class UIHandlerAutomated : UIHandler { + public class UIHandlerAutomated : UIHandler + { // Delegate which validates a completed task. delegate Task TaskValidationDelegate(Task task); @@ -45,12 +47,14 @@ public class UIHandlerAutomated : UIHandler { // Not reusing the ones from the SDK, since they are internal and only visible // because we are providing source libraries. - private enum Backend { + private enum Backend + { GoogleAI, VertexAI, } - protected override void Start() { + protected override void Start() + { // Set of tests that use multiple backends. Func[] multiBackendTests = { TestCreateModel, @@ -109,13 +113,16 @@ protected override void Start() { // Create the set of tests, combining the above lists. List> tests = new(); List testNames = new(); - foreach (Backend backend in Enum.GetValues(typeof(Backend))) { - foreach (var testMethod in multiBackendTests) { + foreach (Backend backend in Enum.GetValues(typeof(Backend))) + { + foreach (var testMethod in multiBackendTests) + { tests.Add(() => testMethod(backend)); testNames.Add($"{testMethod.Method.Name}_{backend}"); } } - foreach (var testMethod in singleTests) { + foreach (var testMethod in singleTests) + { tests.Add(testMethod); testNames.Add(testMethod.Method.Name); } @@ -133,56 +140,71 @@ protected override void Start() { } // Passes along the update call to automated test runner. - protected override void Update() { + protected override void Update() + { base.Update(); - if (testRunner != null) { + if (testRunner != null) + { testRunner.Update(); } } // Throw when condition is false. - private void Assert(string message, bool condition) { + private void Assert(string message, bool condition) + { if (!condition) + { throw new Exception( $"Assertion failed ({testRunner.CurrentTestDescription}): {message}"); + } } // Throw when value1 != value2. - private void AssertEq(string message, T value1, T value2) { - if (!object.Equals(value1, value2)) { + private void AssertEq(string message, T value1, T value2) + { + if (!object.Equals(value1, value2)) + { throw new Exception( $"Assertion failed ({testRunner.CurrentTestDescription}): {value1} != {value2} ({message})"); } } // Throw when the floats are not close enough in value to each other. - private void AssertFloatEq(string message, float value1, float value2) { - if (!(Math.Abs(value1 - value2) < 0.0001f)) { + private void AssertFloatEq(string message, float value1, float value2) + { + if (!(Math.Abs(value1 - value2) < 0.0001f)) + { throw new Exception( $"Assertion failed ({testRunner.CurrentTestDescription}): {value1} !~= {value2} ({message})"); } } - private void AssertType(string message, object obj, out T output) { - if (obj is T parsed) { + private void AssertType(string message, object obj, out T output) + { + if (obj is T parsed) + { output = parsed; } - else { + else + { throw new Exception( $"Assertion failed ({testRunner.CurrentTestDescription}): {obj.GetType()} is wrong type ({message})"); } } // Returns true if the given value is between 0 and 1 (inclusive). - private bool ValidProbability(float value) { + private bool ValidProbability(float value) + { return value >= 0.0f && value <= 1.0f; } // The model name to use for the tests. private readonly string TestModelName = "gemini-2.0-flash"; - private FirebaseAI GetFirebaseAI(Backend backend) { - return backend switch { + private FirebaseAI GetFirebaseAI(Backend backend) + { + return backend switch + { Backend.GoogleAI => FirebaseAI.GetInstance(FirebaseAI.Backend.GoogleAI()), Backend.VertexAI => FirebaseAI.GetInstance(FirebaseAI.Backend.VertexAI()), _ => throw new ArgumentOutOfRangeException(nameof(backend), backend, @@ -191,19 +213,22 @@ private FirebaseAI GetFirebaseAI(Backend backend) { } // Get a basic version of the GenerativeModel to test against. - private GenerativeModel CreateGenerativeModel(Backend backend) { + private GenerativeModel CreateGenerativeModel(Backend backend) + { return GetFirebaseAI(backend).GetGenerativeModel(TestModelName); } // Test if it can create the GenerativeModel. - Task TestCreateModel(Backend backend) { + Task TestCreateModel(Backend backend) + { var model = CreateGenerativeModel(backend); Assert("Failed to create a GenerativeModel.", model != null); return Task.CompletedTask; } // Test if it can set a string in, and get a string output. - async Task TestBasicText(Backend backend) { + async Task TestBasicText(Backend backend) + { var model = CreateGenerativeModel(backend); GenerateContentResponse response = await model.GenerateContentAsync( @@ -216,7 +241,8 @@ async Task TestBasicText(Backend backend) { Assert("Response text was missing", !string.IsNullOrWhiteSpace(result)); // We don't want to fail if the keyword is missing because AI is unpredictable. - if (!response.Text.Contains("Firebase")) { + if (!response.Text.Contains("Firebase")) + { DebugLog("WARNING: Response string was missing the expected keyword 'Firebase': " + $"\n{result}"); } @@ -226,12 +252,14 @@ async Task TestBasicText(Backend backend) { // Ignoring PromptFeedback, too unpredictable if it will be present for this test. - if (response.UsageMetadata.HasValue) { + if (response.UsageMetadata.HasValue) + { Assert("Invalid CandidatesTokenCount", response.UsageMetadata?.CandidatesTokenCount > 0); Assert("Invalid PromptTokenCount", response.UsageMetadata?.PromptTokenCount > 0); Assert("Invalid TotalTokenCount", response.UsageMetadata?.TotalTokenCount > 0); } - else { + else + { DebugLog("WARNING: UsageMetadata was missing from BasicText"); } @@ -240,7 +268,8 @@ async Task TestBasicText(Backend backend) { candidate.FinishReason == FinishReason.Stop); // Test the SafetyRatings, if we got any. - foreach (SafetyRating safetyRating in candidate.SafetyRatings) { + foreach (SafetyRating safetyRating in candidate.SafetyRatings) + { string prefix = $"SafetyRating {safetyRating.Category}"; Assert($"{prefix} claims it was blocked", !safetyRating.Blocked); Assert($"{prefix} has a Probability outside the expected range " + @@ -251,22 +280,26 @@ async Task TestBasicText(Backend backend) { ValidProbability(safetyRating.SeverityScore)); // They should be Negligible, but AI can be unpredictable, so just warn - if (safetyRating.Probability != SafetyRating.HarmProbability.Negligible) { + if (safetyRating.Probability != SafetyRating.HarmProbability.Negligible) + { DebugLog($"WARNING: {prefix} has a high probability: {safetyRating.Probability}"); } - if (safetyRating.Severity != SafetyRating.HarmSeverity.Negligible) { + if (safetyRating.Severity != SafetyRating.HarmSeverity.Negligible) + { DebugLog($"WARNING: {prefix} has a high severity: {safetyRating.Severity}"); } } // For such a basic text, we don't expect citation data, so warn. - if (candidate.CitationMetadata.HasValue) { + if (candidate.CitationMetadata.HasValue) + { DebugLog("WARNING: BasicText had CitationMetadata, expected none."); } } // Test if passing an Image and Text works. - async Task TestBasicImage(Backend backend) { + async Task TestBasicImage(Backend backend) + { var model = CreateGenerativeModel(backend); Assert("Missing RedBlueTexture", RedBlueTexture != null); @@ -286,14 +319,16 @@ async Task TestBasicImage(Backend backend) { Assert("Response text was missing", !string.IsNullOrWhiteSpace(result)); // We don't want to fail if the colors are missing/wrong because AI is unpredictable. if (!response.Text.Contains("red", StringComparison.OrdinalIgnoreCase) || - !response.Text.Contains("blue", StringComparison.OrdinalIgnoreCase)) { + !response.Text.Contains("blue", StringComparison.OrdinalIgnoreCase)) + { DebugLog("WARNING: Response string was missing the correct colors: " + $"\n{result}"); } } // Test if passing in multiple model options works. - async Task TestModelOptions(Backend backend) { + async Task TestModelOptions(Backend backend) + { // Note that most of these settings are hard to reliably verify, so as // long as the call works we are generally happy. var model = GetFirebaseAI(backend).GetGenerativeModel(TestModelName, @@ -329,13 +364,15 @@ async Task TestModelOptions(Backend backend) { // Assuming the GenerationConfig and SystemInstruction worked, // it should respond with just 'Apples' (though possibly with extra whitespace). // However, we only warn, because it isn't guaranteed. - if (result.Trim() != "Apples") { + if (result.Trim() != "Apples") + { DebugLog($"WARNING: Response text wasn't just 'Apples': {result}"); } } // Test if requesting multiple candidates works. - async Task TestMultipleCandidates(Backend backend) { + async Task TestMultipleCandidates(Backend backend) + { var genConfig = new GenerationConfig(candidateCount: 2); var model = GetFirebaseAI(backend).GetGenerativeModel(TestModelName, @@ -350,7 +387,8 @@ async Task TestMultipleCandidates(Backend backend) { } // Test if generating a stream of text works. - async Task TestBasicTextStream(Backend backend) { + async Task TestBasicTextStream(Backend backend) + { var model = CreateGenerativeModel(backend); string keyword = "Firebase"; @@ -362,16 +400,19 @@ async Task TestBasicTextStream(Backend backend) { string fullResult = ""; // The FinishReason should only be set to stop at the end of the stream. bool finishReasonStop = false; - await foreach (GenerateContentResponse response in responseStream) { + await foreach (GenerateContentResponse response in responseStream) + { // Should only be receiving non-empty text responses, but only assert for null. string text = response.Text; Assert("Received null text from the stream.", text != null); - if (string.IsNullOrWhiteSpace(text)) { + if (string.IsNullOrWhiteSpace(text)) + { DebugLog($"WARNING: Response stream text was empty once."); } Assert("Previous FinishReason was stop, but received more", !finishReasonStop); - if (response.Candidates.First().FinishReason == FinishReason.Stop) { + if (response.Candidates.First().FinishReason == FinishReason.Stop) + { finishReasonStop = true; } @@ -381,7 +422,8 @@ async Task TestBasicTextStream(Backend backend) { Assert("Finished without seeing FinishReason.Stop", finishReasonStop); // We don't want to fail if the keyword is missing because AI is unpredictable. - if (!fullResult.Contains("Firebase")) { + if (!fullResult.Contains("Firebase")) + { DebugLog("WARNING: Response string was missing the expected keyword 'Firebase': " + $"\n{fullResult}"); } @@ -398,7 +440,8 @@ async Task TestBasicTextStream(Backend backend) { // Create a GenerativeModel using the parameters above to test Function Calling. private GenerativeModel CreateGenerativeModelWithBasicFunctionCall( Backend backend, - ToolConfig? toolConfig = null) { + ToolConfig? toolConfig = null) + { var tool = new Tool(new FunctionDeclaration( basicFunctionName, "A function used to test Function Calling.", new Dictionary() { @@ -417,7 +460,8 @@ private GenerativeModel CreateGenerativeModelWithBasicFunctionCall( } // Test if FunctionCalling works, using Any to force it. - async Task TestFunctionCallingAny(Backend backend) { + async Task TestFunctionCallingAny(Backend backend) + { // Setting this to Any should force my function call. var model = CreateGenerativeModelWithBasicFunctionCall(backend, new ToolConfig(FunctionCallingConfig.Any())); @@ -447,7 +491,8 @@ async Task TestFunctionCallingAny(Backend backend) { } // Test if setting None will prevent Function Calling. - async Task TestFunctionCallingNone(Backend backend) { + async Task TestFunctionCallingNone(Backend backend) + { // Setting this to None should block my function call. var model = CreateGenerativeModelWithBasicFunctionCall(backend, new ToolConfig(FunctionCallingConfig.None())); @@ -460,7 +505,8 @@ async Task TestFunctionCallingNone(Backend backend) { } // Test if setting a response schema with an enum works. - async Task TestEnumSchemaResponse(Backend backend) { + async Task TestEnumSchemaResponse(Backend backend) + { string enumValue = "MyTestEnum"; var model = GetFirebaseAI(backend).GetGenerativeModel(TestModelName, generationConfig: new GenerationConfig( @@ -474,7 +520,8 @@ async Task TestEnumSchemaResponse(Backend backend) { } // Test if setting a response schema with an enum works. - async Task TestAnyOfSchemaResponse(Backend backend) { + async Task TestAnyOfSchemaResponse(Backend backend) + { var model = GetFirebaseAI(backend).GetGenerativeModel(TestModelName, generationConfig: new GenerationConfig( responseMimeType: "application/json", @@ -491,7 +538,8 @@ async Task TestAnyOfSchemaResponse(Backend backend) { } // Test grounding with Google Search. - async Task TestSearchGrounding(Backend backend) { + async Task TestSearchGrounding(Backend backend) + { // Use a model that supports grounding. var model = GetFirebaseAI(backend).GetGenerativeModel(TestModelName, tools: new Tool[] { new Tool(new GoogleSearch()) } @@ -528,7 +576,8 @@ async Task TestSearchGrounding(Backend backend) { } // Test if when using Chat the model will get the previous messages. - async Task TestChatBasicTextNoHistory(Backend backend) { + async Task TestChatBasicTextNoHistory(Backend backend) + { var model = CreateGenerativeModel(backend); var chat = model.StartChat(); @@ -538,7 +587,8 @@ async Task TestChatBasicTextNoHistory(Backend backend) { "in all future responses?"); Assert("First response was empty.", !string.IsNullOrWhiteSpace(response1.Text)); - if (!response1.Text.Contains(keyword)) { + if (!response1.Text.Contains(keyword)) + { DebugLog($"WARNING: First response string was missing the expected keyword '{keyword}': " + $"\n{response1.Text}"); } @@ -548,7 +598,8 @@ async Task TestChatBasicTextNoHistory(Backend backend) { "include the special word I told you before in it."); Assert("Second response was empty.", !string.IsNullOrWhiteSpace(response2.Text)); - if (!response2.Text.Contains(keyword)) { + if (!response2.Text.Contains(keyword)) + { DebugLog($"WARNING: Second response string was missing the expected keyword '{keyword}': " + $"\n{response2.Text}"); } @@ -557,7 +608,8 @@ async Task TestChatBasicTextNoHistory(Backend backend) { } // Test if when using Chat the model gets the initial starting history. - async Task TestChatBasicTextPriorHistory(Backend backend) { + async Task TestChatBasicTextPriorHistory(Backend backend) + { var model = CreateGenerativeModel(backend); string keyword = "Firebase"; var chat = model.StartChat( @@ -570,7 +622,8 @@ async Task TestChatBasicTextPriorHistory(Backend backend) { "with that special word?"); Assert("Response was empty.", !string.IsNullOrWhiteSpace(response.Text)); - if (!response.Text.Contains(keyword)) { + if (!response.Text.Contains(keyword)) + { DebugLog($"WARNING: Response string was missing the expected keyword '{keyword}': " + $"\n{response.Text}"); } @@ -579,7 +632,8 @@ async Task TestChatBasicTextPriorHistory(Backend backend) { } // Test if when using Chat, the model handles Function Calling, and getting a response. - async Task TestChatFunctionCalling(Backend backend) { + async Task TestChatFunctionCalling(Backend backend) + { var tool = new Tool(new FunctionDeclaration( "GetKeyword", "Call to retrieve a special keyword.", new Dictionary() { @@ -605,7 +659,8 @@ async Task TestChatFunctionCalling(Backend backend) { AssertEq("Wrong number of Args", functionCall.Args.Count, 1); Assert($"Missing parameter", functionCall.Args.ContainsKey("input")); AssertType("Input parameter", functionCall.Args["input"], out string inputParameter); - if (inputParameter != expectedInput) { + if (inputParameter != expectedInput) + { DebugLog($"WARNING: Input parameter: {inputParameter} != {expectedInput}"); } @@ -617,7 +672,8 @@ async Task TestChatFunctionCalling(Backend backend) { // Second response should hopefully have the keyword as part of it. Assert("Second response was empty.", !string.IsNullOrWhiteSpace(response2.Text)); - if (!response2.Text.Contains(keyword)) { + if (!response2.Text.Contains(keyword)) + { DebugLog($"WARNING: Response string was missing the expected keyword '{keyword}': " + $"\n{response2.Text}"); } @@ -626,7 +682,8 @@ async Task TestChatFunctionCalling(Backend backend) { } // Test if Chat works with streaming a text result. - async Task TestChatBasicTextStream(Backend backend) { + async Task TestChatBasicTextStream(Backend backend) + { var model = CreateGenerativeModel(backend); string keyword = "Firebase"; @@ -644,16 +701,19 @@ async Task TestChatBasicTextStream(Backend backend) { // The FinishReason should only be set to stop at the end of the stream. bool finishReasonStop = false; int responseCount = 0; - await foreach (GenerateContentResponse response in responseStream) { + await foreach (GenerateContentResponse response in responseStream) + { // Should only be receiving non-empty text responses, but only assert for null. string text = response.Text; Assert("Received null text from the stream.", text != null); - if (string.IsNullOrWhiteSpace(text)) { + if (string.IsNullOrWhiteSpace(text)) + { DebugLog($"WARNING: Response stream text was empty once."); } Assert("Previous FinishReason was stop, but received more", !finishReasonStop); - if (response.Candidates.First().FinishReason == FinishReason.Stop) { + if (response.Candidates.First().FinishReason == FinishReason.Stop) + { finishReasonStop = true; } @@ -664,7 +724,8 @@ async Task TestChatBasicTextStream(Backend backend) { Assert("Finished without seeing FinishReason.Stop", finishReasonStop); // We don't want to fail if the keyword is missing because AI is unpredictable. - if (!fullResult.Contains(keyword)) { + if (!fullResult.Contains(keyword)) + { DebugLog($"WARNING: Streaming response was missing the expected keyword '{keyword}': " + $"\n{fullResult}"); } @@ -677,7 +738,8 @@ async Task TestChatBasicTextStream(Backend backend) { } // Test if calling CountTokensAsync works as expected. - async Task TestCountTokens(Backend backend) { + async Task TestCountTokens(Backend backend) + { // Include some additional settings, since they are used in the call. var model = GetFirebaseAI(backend).GetGenerativeModel(TestModelName, generationConfig: new GenerationConfig(temperature: 0.8f), @@ -695,7 +757,8 @@ async Task TestCountTokens(Backend backend) { } // Test being able to provide a Youtube link to the model. - async Task TestYoutubeLink(Backend backend) { + async Task TestYoutubeLink(Backend backend) + { var model = CreateGenerativeModel(backend); GenerateContentResponse response = await model.GenerateContentAsync(new ModelContent[] { @@ -710,7 +773,8 @@ async Task TestYoutubeLink(Backend backend) { } // Test being able to generate an image with GenerateContent. - async Task TestGenerateImage(Backend backend) { + async Task TestGenerateImage(Backend backend) + { var model = GetFirebaseAI(backend).GetGenerativeModel("gemini-2.0-flash-exp", generationConfig: new GenerationConfig( responseModalities: new[] { ResponseModality.Text, ResponseModality.Image }) @@ -726,12 +790,16 @@ async Task TestGenerateImage(Backend backend) { bool foundText = false; bool foundImage = false; var candidate = response.Candidates.First(); - foreach (var part in candidate.Content.Parts) { - if (part is ModelContent.TextPart) { + foreach (var part in candidate.Content.Parts) + { + if (part is ModelContent.TextPart) + { foundText = true; } - else if (part is ModelContent.InlineDataPart dataPart) { - if (dataPart.MimeType.Contains("image")) { + else if (part is ModelContent.InlineDataPart dataPart) + { + if (dataPart.MimeType.Contains("image")) + { foundImage = true; } } @@ -740,7 +808,8 @@ async Task TestGenerateImage(Backend backend) { } // Test generating an image via Imagen. - async Task TestImagenGenerateImage(Backend backend) { + async Task TestImagenGenerateImage(Backend backend) + { var model = GetFirebaseAI(backend).GetImagenModel("imagen-3.0-generate-002"); var response = await model.GenerateImagesAsync( @@ -760,7 +829,8 @@ async Task TestImagenGenerateImage(Backend backend) { } // Test generating an image via Imagen with various options. - async Task TestImagenGenerateImageOptions(Backend backend) { + async Task TestImagenGenerateImageOptions(Backend backend) + { var model = GetFirebaseAI(backend).GetImagenModel( modelName: "imagen-3.0-generate-002", generationConfig: new ImagenGenerationConfig( @@ -781,7 +851,8 @@ async Task TestImagenGenerateImageOptions(Backend backend) { AssertEq("FilteredReason", response.FilteredReason, null); AssertEq("Image Count", response.Images.Count, 2); - for (int i = 0; i < 2; i++) { + for (int i = 0; i < 2; i++) + { AssertEq($"Image {i} MimeType", response.Images[i].MimeType, "image/jpeg"); var texture = response.Images[i].AsTexture2D(); @@ -794,7 +865,8 @@ async Task TestImagenGenerateImageOptions(Backend backend) { } // Test defining a thinking budget, and getting back thought tokens. - async Task TestThinkingBudget(Backend backend) { + async Task TestThinkingBudget(Backend backend) + { // Thinking Budget requires at least the 2.5 model. var model = GetFirebaseAI(backend).GetGenerativeModel( modelName: "gemini-2.5-flash", @@ -821,7 +893,8 @@ async Task TestThinkingBudget(Backend backend) { } // Test requesting thought summaries. - async Task TestIncludeThoughts(Backend backend) { + async Task TestIncludeThoughts(Backend backend) + { // Thinking Budget requires at least the 2.5 model. var model = GetFirebaseAI(backend).GetGenerativeModel( modelName: "gemini-2.5-flash", @@ -843,7 +916,8 @@ async Task TestIncludeThoughts(Backend backend) { Assert("ThoughtSummary was missing", !string.IsNullOrWhiteSpace(response.ThoughtSummary)); } - async Task TestCodeExecution(Backend backend) { + async Task TestCodeExecution(Backend backend) + { var model = GetFirebaseAI(backend).GetGenerativeModel( modelName: TestModelName, tools: new Tool[] { new Tool(new CodeExecution()) } @@ -863,8 +937,10 @@ async Task TestCodeExecution(Backend backend) { Assert("Missing CodeExecutionResultParts", codeExecutionResultParts.Any()); } - async Task TestUrlContext(Backend backend) { - if (backend == Backend.GoogleAI) { + async Task TestUrlContext(Backend backend) + { + if (backend == Backend.GoogleAI) + { // TODO: Remove when the backend is more reliable // The Developer backend keeps raising issues with URL context, so disable for now return; @@ -883,13 +959,15 @@ async Task TestUrlContext(Backend backend) { Assert("Response text was missing", !string.IsNullOrWhiteSpace(result)); // We don't want to check on anything specific, but it should ideally have the metadata. - if (!response.Candidates.First().UrlContextMetadata.HasValue) { + if (!response.Candidates.First().UrlContextMetadata.HasValue) + { DebugLog("WARNING: Response did not have expected Url Context Metadata."); } } // Test providing a file from a GCS bucket (Firebase Storage) to the model. - async Task TestReadFile() { + async Task TestReadFile() + { // GCS is currently only supported with VertexAI. var model = CreateGenerativeModel(Backend.VertexAI); @@ -906,7 +984,8 @@ async Task TestReadFile() { // Test providing a file requiring authentication from a GCS bucket (Firebase Storage) to the model. // Should pass if Auth is included or not. To turn Auth on, define INCLUDE_FIREBASE_AUTH at the top of the file. - async Task TestReadSecureFile() { + async Task TestReadSecureFile() + { // GCS is currently only supported with VertexAI. var model = CreateGenerativeModel(Backend.VertexAI); @@ -914,7 +993,8 @@ async Task TestReadSecureFile() { var authResult = await FirebaseAuth.DefaultInstance.SignInAnonymouslyAsync(); #endif - try { + try + { GenerateContentResponse response = await model.GenerateContentAsync(new ModelContent[] { ModelContent.Text("I am testing File input. Can you describe the image in the attached file?"), ModelContent.FileData("image/png", new Uri($"gs://{FirebaseApp.DefaultInstance.Options.StorageBucket}/FCMImages/mushroom.png")) @@ -928,11 +1008,13 @@ async Task TestReadSecureFile() { response.Text.Contains("mushroom", StringComparison.OrdinalIgnoreCase)); } #if !INCLUDE_FIREBASE_AUTH - catch (HttpRequestException ex) { + catch (HttpRequestException ex) + { Assert("Missing Http Status Code 403", ex.Message.Contains("403")); } #endif - finally { + finally + { #if INCLUDE_FIREBASE_AUTH // Clean up the created user. await authResult.User.DeleteAsync(); @@ -945,14 +1027,18 @@ async Task TestReadSecureFile() { "https://raw.githubusercontent.com/FirebaseExtended/vertexai-sdk-test-data/refs/heads/main/mock-responses/"; readonly HttpClient httpClient = new(); - private Task LoadStreamingAsset(string fullPath) { + private Task LoadStreamingAsset(string fullPath) + { TaskCompletionSource tcs = new TaskCompletionSource(); UnityWebRequest request = UnityWebRequest.Get(fullPath); - request.SendWebRequest().completed += (_) => { - if (request.result == UnityWebRequest.Result.Success) { + request.SendWebRequest().completed += (_) => + { + if (request.result == UnityWebRequest.Result.Success) + { tcs.SetResult(request.downloadHandler.text); } - else { + else + { tcs.SetResult(null); } }; @@ -960,19 +1046,23 @@ private Task LoadStreamingAsset(string fullPath) { } // Gets the Json test data from the given filename, potentially downloading from a GitHub repo. - private async Task> GetJsonTestData(string filename) { + private async Task> GetJsonTestData(string filename) + { string jsonString = null; // First, try to load the file from StreamingAssets string localPath = Path.Combine(Application.streamingAssetsPath, "TestData", filename); - if (localPath.StartsWith("jar") || localPath.StartsWith("http")) { + if (localPath.StartsWith("jar") || localPath.StartsWith("http")) + { // Special case to access StreamingAsset content on Android jsonString = await LoadStreamingAsset(localPath); } - else if (File.Exists(localPath)) { + else if (File.Exists(localPath)) + { jsonString = File.ReadAllText(localPath); } - if (string.IsNullOrEmpty(jsonString)) { + if (string.IsNullOrEmpty(jsonString)) + { var response = await httpClient.SendAsync(new HttpRequestMessage(HttpMethod.Get, testDataUrl + filename)); response.EnsureSuccessStatusCode(); @@ -982,16 +1072,19 @@ private async Task> GetJsonTestData(string filename) return Json.Deserialize(jsonString) as Dictionary; } - private Task> GetVertexJsonTestData(string filename) { + private Task> GetVertexJsonTestData(string filename) + { return GetJsonTestData($"vertexai/{filename}"); } - private Task> GetGoogleAIJsonTestData(string filename) { + private Task> GetGoogleAIJsonTestData(string filename) + { return GetJsonTestData($"googleai/{filename}"); } // Helper function to validate that the response has a TextPart as expected. - private void ValidateTextPart(GenerateContentResponse response, string expectedText) { + private void ValidateTextPart(GenerateContentResponse response, string expectedText) + { int candidateCount = response.Candidates.Count(); AssertEq("Candidate count", candidateCount, 1); @@ -1017,7 +1110,8 @@ private void ValidateSafetyRating(SafetyRating safetyRating, float probabilityScore = default, SafetyRating.HarmSeverity severity = default, float severityScore = default, - bool blocked = default) { + bool blocked = default) + { AssertEq($"SafetyRatings: {harmCategory} had incorrect category", safetyRating.Category, harmCategory); AssertEq($"SafetyRatings: {harmCategory} had incorrect probability", @@ -1034,7 +1128,8 @@ private void ValidateSafetyRating(SafetyRating safetyRating, // Helper function to validate UsageMetadata. private void ValidateUsageMetadata(UsageMetadata? usageMetadata, int promptTokenCount, - int candidatesTokenCount, int thoughtsTokenCount, int toolUsePromptTokenCount, int totalTokenCount) { + int candidatesTokenCount, int thoughtsTokenCount, int toolUsePromptTokenCount, int totalTokenCount) + { Assert("UsageMetadata", usageMetadata.HasValue); AssertEq("Wrong PromptTokenCount", usageMetadata?.PromptTokenCount, promptTokenCount); @@ -1055,7 +1150,8 @@ private void ValidateCitation(Citation citation, Uri uri = default, string title = default, string license = default, - DateTime? publicationDate = default) { + DateTime? publicationDate = default) + { AssertEq("Citation.StartIndex", citation.StartIndex, startIndex); AssertEq("Citation.EndIndex", citation.EndIndex, endIndex); AssertEq("Citation.Uri", citation.Uri, uri); @@ -1066,7 +1162,8 @@ private void ValidateCitation(Citation citation, // Test that parsing a basic short reply works as expected. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/unary-success-basic-reply-short.json - async Task InternalTestBasicReplyShort() { + async Task InternalTestBasicReplyShort() + { Dictionary json = await GetVertexJsonTestData("unary-success-basic-reply-short.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1093,7 +1190,8 @@ async Task InternalTestBasicReplyShort() { // Test that parsing a response including Citations works. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/unary-success-citations.json - async Task InternalTestCitations() { + async Task InternalTestCitations() + { Dictionary json = await GetVertexJsonTestData("unary-success-citations.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1124,7 +1222,8 @@ async Task InternalTestCitations() { // Test that parsing a response that was blocked for Safety reasons works. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/unary-failure-prompt-blocked-safety-with-message.json - async Task InternalTestBlockedSafetyWithMessage() { + async Task InternalTestBlockedSafetyWithMessage() + { Dictionary json = await GetVertexJsonTestData("unary-failure-prompt-blocked-safety-with-message.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1154,7 +1253,8 @@ async Task InternalTestBlockedSafetyWithMessage() { // Test that parsing a response that was blocked, and has no Content, works. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/unary-failure-finish-reason-safety-no-content.json - async Task InternalTestFinishReasonSafetyNoContent() { + async Task InternalTestFinishReasonSafetyNoContent() + { Dictionary json = await GetVertexJsonTestData("unary-failure-finish-reason-safety-no-content.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1191,7 +1291,8 @@ async Task InternalTestFinishReasonSafetyNoContent() { // Test that parsing a response with unknown safety enums works. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/unary-success-unknown-enum-safety-ratings.json - async Task InternalTestUnknownEnumSafetyRatings() { + async Task InternalTestUnknownEnumSafetyRatings() + { Dictionary json = await GetVertexJsonTestData("unary-success-unknown-enum-safety-ratings.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1224,7 +1325,8 @@ async Task InternalTestUnknownEnumSafetyRatings() { } // Test that parsing a response with a FunctionCall part works. - async Task InternalTestFunctionCallWithArguments() { + async Task InternalTestFunctionCallWithArguments() + { Dictionary json = await GetVertexJsonTestData("unary-success-function-call-with-arguments.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1244,7 +1346,8 @@ async Task InternalTestFunctionCallWithArguments() { // Test that parsing a Vertex AI response with GroundingMetadata works. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/vertexai/unary-success-google-search-grounding.json - async Task InternalTestVertexAIGrounding() { + async Task InternalTestVertexAIGrounding() + { Dictionary json = await GetVertexJsonTestData("unary-success-google-search-grounding.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1267,7 +1370,8 @@ async Task InternalTestVertexAIGrounding() { // Test that parsing a Google AI response with GroundingMetadata works. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/googleai/unary-success-google-search-grounding.json - async Task InternalTestGoogleAIGrounding() { + async Task InternalTestGoogleAIGrounding() + { Dictionary json = await GetGoogleAIJsonTestData("unary-success-google-search-grounding.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.GoogleAI); @@ -1305,7 +1409,8 @@ async Task InternalTestGoogleAIGrounding() { // Test that parsing a Google AI response with empty GroundingChunks works. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/googleai/unary-success-google-search-grounding-empty-grounding-chunks.json - async Task InternalTestGoogleAIGroundingEmptyChunks() { + async Task InternalTestGoogleAIGroundingEmptyChunks() + { Dictionary json = await GetGoogleAIJsonTestData("unary-success-google-search-grounding-empty-grounding-chunks.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.GoogleAI); @@ -1328,7 +1433,8 @@ async Task InternalTestGoogleAIGroundingEmptyChunks() { } // Test parsing an empty GroundingMetadata object. - Task InternalTestGroundingMetadata_Empty() { + Task InternalTestGroundingMetadata_Empty() + { var json = new Dictionary(); var grounding = GroundingMetadata.FromJson(json); @@ -1341,7 +1447,8 @@ Task InternalTestGroundingMetadata_Empty() { } // Test parsing an empty Segment object. - Task InternalTestSegment_Empty() { + Task InternalTestSegment_Empty() + { var json = new Dictionary(); var segment = Segment.FromJson(json); @@ -1354,7 +1461,8 @@ Task InternalTestSegment_Empty() { } // Test that parsing a count token response works. - async Task InternalTestCountTokenResponse() { + async Task InternalTestCountTokenResponse() + { Dictionary json = await GetVertexJsonTestData("unary-success-detailed-token-response.json"); CountTokensResponse response = CountTokensResponse.FromJson(json); @@ -1371,7 +1479,8 @@ async Task InternalTestCountTokenResponse() { } // Test that the UsageMetadata is getting parsed correctly. - async Task InternalTestBasicResponseLongUsageMetadata() { + async Task InternalTestBasicResponseLongUsageMetadata() + { Dictionary json = await GetVertexJsonTestData("unary-success-basic-response-long-usage-metadata.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1396,7 +1505,8 @@ async Task InternalTestBasicResponseLongUsageMetadata() { // Test that parsing a basic short reply from Google AI endpoint works as expected. // https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/googleai/unary-success-basic-reply-short.txt - async Task InternalTestGoogleAIBasicReplyShort() { + async Task InternalTestGoogleAIBasicReplyShort() + { Dictionary json = await GetGoogleAIJsonTestData("unary-success-basic-reply-short.json"); // GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.GoogleAI); @@ -1422,7 +1532,8 @@ async Task InternalTestGoogleAIBasicReplyShort() { // Test parsing a Google AI format response with citations. // Based on: https://github.com/FirebaseExtended/vertexai-sdk-test-data/blob/main/mock-responses/googleai/unary-success-citations.txt - async Task InternalTestGoogleAICitations() { + async Task InternalTestGoogleAICitations() + { Dictionary json = await GetGoogleAIJsonTestData("unary-success-citations.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.GoogleAI); @@ -1503,14 +1614,16 @@ async Task InternalTestGoogleAICitations() { AssertEq("CandidatesTokensDetails[0].TokenCount", candidatesDetails[0].TokenCount, 1667); } - async Task InternalTestGenerateImagesBase64() { + async Task InternalTestGenerateImagesBase64() + { Dictionary json = await GetVertexJsonTestData("unary-success-generate-images-base64.json"); var response = ImagenGenerationResponse.FromJson(json); AssertEq("FilteredReason", response.FilteredReason, null); AssertEq("Image Count", response.Images.Count, 4); - for (int i = 0; i < response.Images.Count; i++) { + for (int i = 0; i < response.Images.Count; i++) + { var image = response.Images[i]; AssertEq($"Image {i} MimeType", image.MimeType, "image/png"); Assert($"Image {i} Length: {image.Data.Length}", image.Data.Length > 0); @@ -1520,7 +1633,8 @@ async Task InternalTestGenerateImagesBase64() { } } - async Task InternalTestGenerateImagesAllFiltered() { + async Task InternalTestGenerateImagesAllFiltered() + { Dictionary json = await GetVertexJsonTestData("unary-failure-generate-images-all-filtered.json"); var response = ImagenGenerationResponse.FromJson(json); @@ -1532,7 +1646,8 @@ async Task InternalTestGenerateImagesAllFiltered() { AssertEq("Image Count", response.Images.Count, 0); } - async Task InternalTestGenerateImagesBase64SomeFiltered() { + async Task InternalTestGenerateImagesBase64SomeFiltered() + { Dictionary json = await GetVertexJsonTestData("unary-failure-generate-images-base64-some-filtered.json"); var response = ImagenGenerationResponse.FromJson(json); @@ -1542,7 +1657,8 @@ async Task InternalTestGenerateImagesBase64SomeFiltered() { "If you think this was an error, send feedback."); AssertEq("Image Count", response.Images.Count, 2); - for (int i = 0; i < response.Images.Count; i++) { + for (int i = 0; i < response.Images.Count; i++) + { var image = response.Images[i]; AssertEq($"Image {i} MimeType", image.MimeType, "image/png"); Assert($"Image {i} Length: {image.Data.Length}", image.Data.Length > 0); @@ -1552,7 +1668,8 @@ async Task InternalTestGenerateImagesBase64SomeFiltered() { } } - async Task InternalTestThoughtSummary() { + async Task InternalTestThoughtSummary() + { Dictionary json = await GetVertexJsonTestData("unary-success-thinking-reply-thought-summary.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1567,7 +1684,8 @@ async Task InternalTestThoughtSummary() { ValidateUsageMetadata(response.UsageMetadata, 13, 2, 39, 0, 54); } - async Task InternalTestCodeExecution() { + async Task InternalTestCodeExecution() + { Dictionary json = await GetVertexJsonTestData("unary-success-code-execution.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI); @@ -1592,7 +1710,8 @@ async Task InternalTestCodeExecution() { AssertEq("ToolUsePromptTokensDetails[0].TokenCount", details[0].TokenCount, 181); } - async Task InternalTestUrlContextMixedValidity() { + async Task InternalTestUrlContextMixedValidity() + { Dictionary json = await GetVertexJsonTestData("unary-success-url-context-mixed-validity.json"); GenerateContentResponse response = GenerateContentResponse.FromJson(json, FirebaseAI.Backend.InternalProvider.VertexAI);