diff --git a/firebaseai/src/Candidate.cs b/firebaseai/src/Candidate.cs
index c3053c95..9565291f 100644
--- a/firebaseai/src/Candidate.cs
+++ b/firebaseai/src/Candidate.cs
@@ -17,140 +17,148 @@
using System.Collections.Generic;
using Firebase.AI.Internal;
-namespace Firebase.AI {
+namespace Firebase.AI
+{
+ ///
+ /// Represents the reason why the model stopped generating content.
+ ///
+ public enum FinishReason
+ {
+ ///
+ /// A new and not yet supported value.
+ ///
+ Unknown = 0,
+ ///
+ /// Natural stop point of the model or provided stop sequence.
+ ///
+ Stop,
+ ///
+ /// The maximum number of tokens as specified in the request was reached.
+ ///
+ MaxTokens,
+ ///
+ /// The token generation was stopped because the response was flagged for safety reasons.
+ ///
+ Safety,
+ ///
+ /// The token generation was stopped because the response was flagged for unauthorized citations.
+ ///
+ Recitation,
+ ///
+ /// All other reasons that stopped token generation.
+ ///
+ Other,
+ ///
+ /// Token generation was stopped because the response contained forbidden terms.
+ ///
+ Blocklist,
+ ///
+ /// Token generation was stopped because the response contained potentially prohibited content.
+ ///
+ ProhibitedContent,
+ ///
+ /// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII).
+ ///
+ SPII,
+ ///
+ /// Token generation was stopped because the function call generated by the model was invalid.
+ ///
+ MalformedFunctionCall,
+ }
-///
-/// Represents the reason why the model stopped generating content.
-///
-public enum FinishReason {
- ///
- /// A new and not yet supported value.
- ///
- Unknown = 0,
- ///
- /// Natural stop point of the model or provided stop sequence.
- ///
- Stop,
- ///
- /// The maximum number of tokens as specified in the request was reached.
- ///
- MaxTokens,
- ///
- /// The token generation was stopped because the response was flagged for safety reasons.
- ///
- Safety,
- ///
- /// The token generation was stopped because the response was flagged for unauthorized citations.
- ///
- Recitation,
///
- /// All other reasons that stopped token generation.
+ /// A struct representing a possible reply to a content generation prompt.
+ /// Each content generation prompt may produce multiple candidate responses.
///
- Other,
- ///
- /// Token generation was stopped because the response contained forbidden terms.
- ///
- Blocklist,
- ///
- /// Token generation was stopped because the response contained potentially prohibited content.
- ///
- ProhibitedContent,
- ///
- /// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII).
- ///
- SPII,
- ///
- /// Token generation was stopped because the function call generated by the model was invalid.
- ///
- MalformedFunctionCall,
-}
+ public readonly struct Candidate
+ {
+ private readonly IReadOnlyList _safetyRatings;
-///
-/// A struct representing a possible reply to a content generation prompt.
-/// Each content generation prompt may produce multiple candidate responses.
-///
-public readonly struct Candidate {
- private readonly IReadOnlyList _safetyRatings;
+ ///
+ /// The response’s content.
+ ///
+ public ModelContent Content { get; }
- ///
- /// The response’s content.
- ///
- public ModelContent Content { get; }
-
- ///
- /// The safety rating of the response content.
- ///
- public IReadOnlyList SafetyRatings {
- get {
- return _safetyRatings ?? new List();
+ ///
+ /// The safety rating of the response content.
+ ///
+ public IReadOnlyList SafetyRatings
+ {
+ get
+ {
+ return _safetyRatings ?? new List();
+ }
}
- }
- ///
- /// The reason the model stopped generating content, if it exists;
- /// for example, if the model generated a predefined stop sequence.
- ///
- public FinishReason? FinishReason { get; }
+ ///
+ /// The reason the model stopped generating content, if it exists;
+ /// for example, if the model generated a predefined stop sequence.
+ ///
+ public FinishReason? FinishReason { get; }
- ///
- /// Cited works in the model’s response content, if it exists.
- ///
- public CitationMetadata? CitationMetadata { get; }
+ ///
+ /// Cited works in the model’s response content, if it exists.
+ ///
+ public CitationMetadata? CitationMetadata { get; }
- ///
- /// Grounding metadata for the response, if any.
- ///
- public GroundingMetadata? GroundingMetadata { get; }
-
- ///
- /// Metadata related to the `URLContext` tool.
- ///
- public UrlContextMetadata? UrlContextMetadata { get; }
+ ///
+ /// Grounding metadata for the response, if any.
+ ///
+ public GroundingMetadata? GroundingMetadata { get; }
- // Hidden constructor, users don't need to make this.
- private Candidate(ModelContent content, List safetyRatings,
- FinishReason? finishReason, CitationMetadata? citationMetadata,
- GroundingMetadata? groundingMetadata, UrlContextMetadata? urlContextMetadata) {
- Content = content;
- _safetyRatings = safetyRatings ?? new List();
- FinishReason = finishReason;
- CitationMetadata = citationMetadata;
- GroundingMetadata = groundingMetadata;
- UrlContextMetadata = urlContextMetadata;
- }
+ ///
+ /// Metadata related to the `URLContext` tool.
+ ///
+ public UrlContextMetadata? UrlContextMetadata { get; }
- private static FinishReason ParseFinishReason(string str) {
- return str switch {
- "STOP" => Firebase.AI.FinishReason.Stop,
- "MAX_TOKENS" => Firebase.AI.FinishReason.MaxTokens,
- "SAFETY" => Firebase.AI.FinishReason.Safety,
- "RECITATION" => Firebase.AI.FinishReason.Recitation,
- "OTHER" => Firebase.AI.FinishReason.Other,
- "BLOCKLIST" => Firebase.AI.FinishReason.Blocklist,
- "PROHIBITED_CONTENT" => Firebase.AI.FinishReason.ProhibitedContent,
- "SPII" => Firebase.AI.FinishReason.SPII,
- "MALFORMED_FUNCTION_CALL" => Firebase.AI.FinishReason.MalformedFunctionCall,
- _ => Firebase.AI.FinishReason.Unknown,
- };
- }
+ // Hidden constructor, users don't need to make this.
+ private Candidate(ModelContent content, List safetyRatings,
+ FinishReason? finishReason, CitationMetadata? citationMetadata,
+ GroundingMetadata? groundingMetadata, UrlContextMetadata? urlContextMetadata)
+ {
+ Content = content;
+ _safetyRatings = safetyRatings ?? new List();
+ FinishReason = finishReason;
+ CitationMetadata = citationMetadata;
+ GroundingMetadata = groundingMetadata;
+ UrlContextMetadata = urlContextMetadata;
+ }
- ///
- /// Intended for internal use only.
- /// This method is used for deserializing JSON responses and should not be called directly.
- ///
- internal static Candidate FromJson(Dictionary jsonDict,
- FirebaseAI.Backend.InternalProvider backend) {
- return new Candidate(
- jsonDict.ParseObject("content", ModelContent.FromJson, defaultValue: new ModelContent("model")),
- jsonDict.ParseObjectList("safetyRatings", SafetyRating.FromJson),
- jsonDict.ParseNullableEnum("finishReason", ParseFinishReason),
- jsonDict.ParseNullableObject("citationMetadata",
- (d) => Firebase.AI.CitationMetadata.FromJson(d, backend)),
- jsonDict.ParseNullableObject("groundingMetadata",
- Firebase.AI.GroundingMetadata.FromJson),
- jsonDict.ParseNullableObject("urlContextMetadata",
- Firebase.AI.UrlContextMetadata.FromJson));
+ private static FinishReason ParseFinishReason(string str)
+ {
+ return str switch
+ {
+ "STOP" => Firebase.AI.FinishReason.Stop,
+ "MAX_TOKENS" => Firebase.AI.FinishReason.MaxTokens,
+ "SAFETY" => Firebase.AI.FinishReason.Safety,
+ "RECITATION" => Firebase.AI.FinishReason.Recitation,
+ "OTHER" => Firebase.AI.FinishReason.Other,
+ "BLOCKLIST" => Firebase.AI.FinishReason.Blocklist,
+ "PROHIBITED_CONTENT" => Firebase.AI.FinishReason.ProhibitedContent,
+ "SPII" => Firebase.AI.FinishReason.SPII,
+ "MALFORMED_FUNCTION_CALL" => Firebase.AI.FinishReason.MalformedFunctionCall,
+ _ => Firebase.AI.FinishReason.Unknown,
+ };
+ }
+
+ ///
+ /// Intended for internal use only.
+ /// This method is used for deserializing JSON responses and should not be called directly.
+ ///
+ internal static Candidate FromJson(Dictionary jsonDict,
+ FirebaseAI.Backend.InternalProvider backend)
+ {
+ return new Candidate(
+ jsonDict.ParseObject("content", ModelContent.FromJson, defaultValue: new ModelContent("model")),
+ jsonDict.ParseObjectList("safetyRatings", SafetyRating.FromJson),
+ jsonDict.ParseNullableEnum("finishReason", ParseFinishReason),
+ jsonDict.ParseNullableObject("citationMetadata",
+ (d) => Firebase.AI.CitationMetadata.FromJson(d, backend)),
+ jsonDict.ParseNullableObject("groundingMetadata",
+ Firebase.AI.GroundingMetadata.FromJson),
+ jsonDict.ParseNullableObject("urlContextMetadata",
+ Firebase.AI.UrlContextMetadata.FromJson));
+ }
}
-}
}
diff --git a/firebaseai/src/Chat.cs b/firebaseai/src/Chat.cs
index d8df8f75..a437a371 100644
--- a/firebaseai/src/Chat.cs
+++ b/firebaseai/src/Chat.cs
@@ -21,172 +21,192 @@
using System.Threading.Tasks;
using Firebase.AI.Internal;
-namespace Firebase.AI {
-
-///
-/// An object that represents a back-and-forth chat with a model, capturing the history and saving
-/// the context in memory between each message sent.
-///
-public class Chat {
- private readonly GenerativeModel generativeModel;
- private readonly List chatHistory;
-
+namespace Firebase.AI
+{
///
- /// The previous content from the chat that has been successfully sent and received from the
- /// model. This will be provided to the model for each message sent as context for the discussion.
+ /// An object that represents a back-and-forth chat with a model, capturing the history and saving
+ /// the context in memory between each message sent.
///
- public IReadOnlyList History => chatHistory;
-
- // Note: No public constructor, get one through GenerativeModel.StartChat
- private Chat(GenerativeModel model, IEnumerable initialHistory) {
- generativeModel = model;
-
- if (initialHistory != null) {
- chatHistory = new List(initialHistory);
- } else {
- chatHistory = new List();
+ public class Chat
+ {
+ private readonly GenerativeModel generativeModel;
+ private readonly List chatHistory;
+
+ ///
+ /// The previous content from the chat that has been successfully sent and received from the
+ /// model. This will be provided to the model for each message sent as context for the discussion.
+ ///
+ public IReadOnlyList History => chatHistory;
+
+ // Note: No public constructor, get one through GenerativeModel.StartChat
+ private Chat(GenerativeModel model, IEnumerable initialHistory)
+ {
+ generativeModel = model;
+
+ if (initialHistory != null)
+ {
+ chatHistory = new List(initialHistory);
+ }
+ else
+ {
+ chatHistory = new List();
+ }
}
- }
-
- ///
- /// Intended for internal use only.
- /// Use `GenerativeModel.StartChat` instead to ensure proper initialization and configuration of the `Chat`.
- ///
- internal static Chat InternalCreateChat(GenerativeModel model, IEnumerable initialHistory) {
- return new Chat(model, initialHistory);
- }
-
- ///
- /// Sends a message using the existing history of this chat as context. If successful, the message
- /// and response will be added to the history. If unsuccessful, history will remain unchanged.
- ///
- /// The input given to the model as a prompt.
- /// An optional token to cancel the operation.
- /// The model's response if no error occurred.
- /// Thrown when an error occurs during content generation.
- public Task SendMessageAsync(
- ModelContent content, CancellationToken cancellationToken = default) {
- return SendMessageAsync(new[] { content }, cancellationToken);
- }
- ///
- /// Sends a message using the existing history of this chat as context. If successful, the message
- /// and response will be added to the history. If unsuccessful, history will remain unchanged.
- ///
- /// The text given to the model as a prompt.
- /// An optional token to cancel the operation.
- /// The model's response if no error occurred.
- /// Thrown when an error occurs during content generation.
- public Task SendMessageAsync(
- string text, CancellationToken cancellationToken = default) {
- return SendMessageAsync(new ModelContent[] { ModelContent.Text(text) }, cancellationToken);
- }
- ///
- /// Sends a message using the existing history of this chat as context. If successful, the message
- /// and response will be added to the history. If unsuccessful, history will remain unchanged.
- ///
- /// The input given to the model as a prompt.
- /// An optional token to cancel the operation.
- /// The model's response if no error occurred.
- /// Thrown when an error occurs during content generation.
- public Task SendMessageAsync(
- IEnumerable content, CancellationToken cancellationToken = default) {
- return SendMessageAsyncInternal(content, cancellationToken);
- }
- ///
- /// Sends a message using the existing history of this chat as context. If successful, the message
- /// and response will be added to the history. If unsuccessful, history will remain unchanged.
- ///
- /// The input given to the model as a prompt.
- /// An optional token to cancel the operation.
- /// A stream of generated content responses from the model.
- /// Thrown when an error occurs during content generation.
- public IAsyncEnumerable SendMessageStreamAsync(
- ModelContent content, CancellationToken cancellationToken = default) {
- return SendMessageStreamAsync(new[] { content }, cancellationToken);
- }
- ///
- /// Sends a message using the existing history of this chat as context. If successful, the message
- /// and response will be added to the history. If unsuccessful, history will remain unchanged.
- ///
- /// The text given to the model as a prompt.
- /// An optional token to cancel the operation.
- /// A stream of generated content responses from the model.
- /// Thrown when an error occurs during content generation.
- public IAsyncEnumerable SendMessageStreamAsync(
- string text, CancellationToken cancellationToken = default) {
- return SendMessageStreamAsync(new ModelContent[] { ModelContent.Text(text) }, cancellationToken);
- }
- ///
- /// Sends a message using the existing history of this chat as context. If successful, the message
- /// and response will be added to the history. If unsuccessful, history will remain unchanged.
- ///
- /// The input given to the model as a prompt.
- /// An optional token to cancel the operation.
- /// A stream of generated content responses from the model.
- /// Thrown when an error occurs during content generation.
- public IAsyncEnumerable SendMessageStreamAsync(
- IEnumerable content, CancellationToken cancellationToken = default) {
- return SendMessageStreamAsyncInternal(content, cancellationToken);
- }
+ ///
+ /// Intended for internal use only.
+ /// Use `GenerativeModel.StartChat` instead to ensure proper initialization and configuration of the `Chat`.
+ ///
+ internal static Chat InternalCreateChat(GenerativeModel model, IEnumerable initialHistory)
+ {
+ return new Chat(model, initialHistory);
+ }
- private async Task SendMessageAsyncInternal(
- IEnumerable requestContent, CancellationToken cancellationToken = default) {
- // Make sure that the requests are set to to role "user".
- List fixedRequests = requestContent.Select(FirebaseAIExtensions.ConvertToUser).ToList();
- // Set up the context to send in the request
- List fullRequest = new(chatHistory);
- fullRequest.AddRange(fixedRequests);
-
- // Note: GenerateContentAsync can throw exceptions if there was a problem, but
- // we allow it to just be passed back to the user.
- GenerateContentResponse response = await generativeModel.GenerateContentAsync(fullRequest, cancellationToken);
-
- // Only after getting a valid response, add both to the history for later.
- // But either way pass the response along to the user.
- if (response.Candidates.Any()) {
- ModelContent responseContent = response.Candidates.First().Content;
-
- chatHistory.AddRange(fixedRequests);
- chatHistory.Add(responseContent.ConvertToModel());
+ ///
+ /// Sends a message using the existing history of this chat as context. If successful, the message
+ /// and response will be added to the history. If unsuccessful, history will remain unchanged.
+ ///
+ /// The input given to the model as a prompt.
+ /// An optional token to cancel the operation.
+ /// The model's response if no error occurred.
+ /// Thrown when an error occurs during content generation.
+ public Task SendMessageAsync(
+ ModelContent content, CancellationToken cancellationToken = default)
+ {
+ return SendMessageAsync(new[] { content }, cancellationToken);
+ }
+ ///
+ /// Sends a message using the existing history of this chat as context. If successful, the message
+ /// and response will be added to the history. If unsuccessful, history will remain unchanged.
+ ///
+ /// The text given to the model as a prompt.
+ /// An optional token to cancel the operation.
+ /// The model's response if no error occurred.
+ /// Thrown when an error occurs during content generation.
+ public Task SendMessageAsync(
+ string text, CancellationToken cancellationToken = default)
+ {
+ return SendMessageAsync(new ModelContent[] { ModelContent.Text(text) }, cancellationToken);
+ }
+ ///
+ /// Sends a message using the existing history of this chat as context. If successful, the message
+ /// and response will be added to the history. If unsuccessful, history will remain unchanged.
+ ///
+ /// The input given to the model as a prompt.
+ /// An optional token to cancel the operation.
+ /// The model's response if no error occurred.
+ /// Thrown when an error occurs during content generation.
+ public Task SendMessageAsync(
+ IEnumerable content, CancellationToken cancellationToken = default)
+ {
+ return SendMessageAsyncInternal(content, cancellationToken);
}
- return response;
- }
+ ///
+ /// Sends a message using the existing history of this chat as context. If successful, the message
+ /// and response will be added to the history. If unsuccessful, history will remain unchanged.
+ ///
+ /// The input given to the model as a prompt.
+ /// An optional token to cancel the operation.
+ /// A stream of generated content responses from the model.
+ /// Thrown when an error occurs during content generation.
+ public IAsyncEnumerable SendMessageStreamAsync(
+ ModelContent content, CancellationToken cancellationToken = default)
+ {
+ return SendMessageStreamAsync(new[] { content }, cancellationToken);
+ }
+ ///
+ /// Sends a message using the existing history of this chat as context. If successful, the message
+ /// and response will be added to the history. If unsuccessful, history will remain unchanged.
+ ///
+ /// The text given to the model as a prompt.
+ /// An optional token to cancel the operation.
+ /// A stream of generated content responses from the model.
+ /// Thrown when an error occurs during content generation.
+ public IAsyncEnumerable SendMessageStreamAsync(
+ string text, CancellationToken cancellationToken = default)
+ {
+ return SendMessageStreamAsync(new ModelContent[] { ModelContent.Text(text) }, cancellationToken);
+ }
+ ///
+ /// Sends a message using the existing history of this chat as context. If successful, the message
+ /// and response will be added to the history. If unsuccessful, history will remain unchanged.
+ ///
+ /// The input given to the model as a prompt.
+ /// An optional token to cancel the operation.
+ /// A stream of generated content responses from the model.
+ /// Thrown when an error occurs during content generation.
+ public IAsyncEnumerable SendMessageStreamAsync(
+ IEnumerable content, CancellationToken cancellationToken = default)
+ {
+ return SendMessageStreamAsyncInternal(content, cancellationToken);
+ }
- private async IAsyncEnumerable SendMessageStreamAsyncInternal(
- IEnumerable requestContent,
- [EnumeratorCancellation] CancellationToken cancellationToken = default) {
- // Make sure that the requests are set to to role "user".
- List fixedRequests = requestContent.Select(FirebaseAIExtensions.ConvertToUser).ToList();
- // Set up the context to send in the request
- List fullRequest = new(chatHistory);
- fullRequest.AddRange(fixedRequests);
-
- List responseContents = new();
- bool saveHistory = true;
- // Note: GenerateContentStreamAsync can throw exceptions if there was a problem, but
- // we allow it to just be passed back to the user.
- await foreach (GenerateContentResponse response in
- generativeModel.GenerateContentStreamAsync(fullRequest, cancellationToken)) {
- // If the response had a problem, we still want to pass it along to the user for context,
- // but we don't want to save the history anymore.
- if (response.Candidates.Any()) {
+ private async Task SendMessageAsyncInternal(
+ IEnumerable requestContent, CancellationToken cancellationToken = default)
+ {
+ // Make sure that the requests are set to to role "user".
+ List fixedRequests = requestContent.Select(FirebaseAIExtensions.ConvertToUser).ToList();
+ // Set up the context to send in the request
+ List fullRequest = new(chatHistory);
+ fullRequest.AddRange(fixedRequests);
+
+ // Note: GenerateContentAsync can throw exceptions if there was a problem, but
+ // we allow it to just be passed back to the user.
+ GenerateContentResponse response = await generativeModel.GenerateContentAsync(fullRequest, cancellationToken);
+
+ // Only after getting a valid response, add both to the history for later.
+ // But either way pass the response along to the user.
+ if (response.Candidates.Any())
+ {
ModelContent responseContent = response.Candidates.First().Content;
- responseContents.Add(responseContent.ConvertToModel());
- } else {
- saveHistory = false;
+
+ chatHistory.AddRange(fixedRequests);
+ chatHistory.Add(responseContent.ConvertToModel());
}
- yield return response;
+ return response;
}
- // After getting all the responses, and they were all valid, add everything to the history
- if (saveHistory) {
- chatHistory.AddRange(fixedRequests);
- chatHistory.AddRange(responseContents);
+ private async IAsyncEnumerable SendMessageStreamAsyncInternal(
+ IEnumerable requestContent,
+ [EnumeratorCancellation] CancellationToken cancellationToken = default)
+ {
+ // Make sure that the requests are set to to role "user".
+ List fixedRequests = requestContent.Select(FirebaseAIExtensions.ConvertToUser).ToList();
+ // Set up the context to send in the request
+ List fullRequest = new(chatHistory);
+ fullRequest.AddRange(fixedRequests);
+
+ List responseContents = new();
+ bool saveHistory = true;
+ // Note: GenerateContentStreamAsync can throw exceptions if there was a problem, but
+ // we allow it to just be passed back to the user.
+ await foreach (GenerateContentResponse response in
+ generativeModel.GenerateContentStreamAsync(fullRequest, cancellationToken))
+ {
+ // If the response had a problem, we still want to pass it along to the user for context,
+ // but we don't want to save the history anymore.
+ if (response.Candidates.Any())
+ {
+ ModelContent responseContent = response.Candidates.First().Content;
+ responseContents.Add(responseContent.ConvertToModel());
+ }
+ else
+ {
+ saveHistory = false;
+ }
+
+ yield return response;
+ }
+
+ // After getting all the responses, and they were all valid, add everything to the history
+ if (saveHistory)
+ {
+ chatHistory.AddRange(fixedRequests);
+ chatHistory.AddRange(responseContents);
+ }
}
}
-}
}
diff --git a/firebaseai/src/Citation.cs b/firebaseai/src/Citation.cs
index c98ab2a4..478978f0 100644
--- a/firebaseai/src/Citation.cs
+++ b/firebaseai/src/Citation.cs
@@ -18,114 +18,125 @@
using System.Collections.Generic;
using Firebase.AI.Internal;
-namespace Firebase.AI {
-
-///
-/// A collection of source attributions for a piece of content.
-///
-public readonly struct CitationMetadata {
- private readonly IReadOnlyList _citations;
-
+namespace Firebase.AI
+{
///
- /// A list of individual cited sources and the parts of the content to which they apply.
+ /// A collection of source attributions for a piece of content.
///
- public IReadOnlyList Citations {
- get {
- return _citations ?? new List();
+ public readonly struct CitationMetadata
+ {
+ private readonly IReadOnlyList _citations;
+
+ ///
+ /// A list of individual cited sources and the parts of the content to which they apply.
+ ///
+ public IReadOnlyList Citations
+ {
+ get
+ {
+ return _citations ?? new List();
+ }
}
- }
- // Hidden constructor, users don't need to make this.
- private CitationMetadata(List citations) {
- _citations = citations;
- }
+ // Hidden constructor, users don't need to make this.
+ private CitationMetadata(List citations)
+ {
+ _citations = citations;
+ }
- ///
- /// Intended for internal use only.
- /// This method is used for deserializing JSON responses and should not be called directly.
- ///
- internal static CitationMetadata FromJson(Dictionary jsonDict,
- FirebaseAI.Backend.InternalProvider backend) {
- string citationKey = backend switch {
- FirebaseAI.Backend.InternalProvider.GoogleAI => "citationSources",
- FirebaseAI.Backend.InternalProvider.VertexAI => "citations",
- _ => throw new ArgumentOutOfRangeException(nameof(backend), backend,
- "Unsupported or unhandled backend provider encountered.")
- };
- return new CitationMetadata(
- jsonDict.ParseObjectList(citationKey, Citation.FromJson));
+ ///
+ /// Intended for internal use only.
+ /// This method is used for deserializing JSON responses and should not be called directly.
+ ///
+ internal static CitationMetadata FromJson(Dictionary jsonDict,
+ FirebaseAI.Backend.InternalProvider backend)
+ {
+ string citationKey = backend switch
+ {
+ FirebaseAI.Backend.InternalProvider.GoogleAI => "citationSources",
+ FirebaseAI.Backend.InternalProvider.VertexAI => "citations",
+ _ => throw new ArgumentOutOfRangeException(nameof(backend), backend,
+ "Unsupported or unhandled backend provider encountered.")
+ };
+ return new CitationMetadata(
+ jsonDict.ParseObjectList(citationKey, Citation.FromJson));
+ }
}
-}
-///
-/// A struct describing a source attribution.
-///
-public readonly struct Citation {
- ///
- /// The inclusive beginning of a sequence in a model response that derives from a cited source.
- ///
- public int StartIndex { get; }
- ///
- /// The exclusive end of a sequence in a model response that derives from a cited source.
- ///
- public int EndIndex { get; }
///
- /// A link to the cited source, if available.
+ /// A struct describing a source attribution.
///
- public System.Uri Uri { get; }
- ///
- /// The title of the cited source, if available.
- ///
- public string Title { get; }
- ///
- /// The license the cited source work is distributed under, if specified.
- ///
- public string License { get; }
- ///
- /// The publication date of the cited source, if available.
- ///
- public System.DateTime? PublicationDate { get; }
-
- // Hidden constructor, users don't need to make this.
- private Citation(int startIndex, int endIndex, Uri uri, string title,
- string license, DateTime? publicationDate) {
- StartIndex = startIndex;
- EndIndex = endIndex;
- Uri = uri;
- Title = title;
- License = license;
- PublicationDate = publicationDate;
- }
+ public readonly struct Citation
+ {
+ ///
+ /// The inclusive beginning of a sequence in a model response that derives from a cited source.
+ ///
+ public int StartIndex { get; }
+ ///
+ /// The exclusive end of a sequence in a model response that derives from a cited source.
+ ///
+ public int EndIndex { get; }
+ ///
+ /// A link to the cited source, if available.
+ ///
+ public System.Uri Uri { get; }
+ ///
+ /// The title of the cited source, if available.
+ ///
+ public string Title { get; }
+ ///
+ /// The license the cited source work is distributed under, if specified.
+ ///
+ public string License { get; }
+ ///
+ /// The publication date of the cited source, if available.
+ ///
+ public System.DateTime? PublicationDate { get; }
- ///
- /// Intended for internal use only.
- /// This method is used for deserializing JSON responses and should not be called directly.
- ///
- internal static Citation FromJson(Dictionary jsonDict) {
- // If there is a Uri, need to convert it.
- Uri uri = null;
- if (jsonDict.TryParseValue("uri", out string uriString)) {
- uri = new Uri(uriString);
+ // Hidden constructor, users don't need to make this.
+ private Citation(int startIndex, int endIndex, Uri uri, string title,
+ string license, DateTime? publicationDate)
+ {
+ StartIndex = startIndex;
+ EndIndex = endIndex;
+ Uri = uri;
+ Title = title;
+ License = license;
+ PublicationDate = publicationDate;
}
- // If there is a publication date, we need to convert it.
- DateTime? pubDate = null;
- if (jsonDict.TryParseValue("publicationDate", out Dictionary dateDict)) {
- // Make sure that if any key is missing, it has a default value that will work with DateTime.
- pubDate = new DateTime(
- dateDict.ParseValue("year", defaultValue: 1),
- dateDict.ParseValue("month", defaultValue: 1),
- dateDict.ParseValue("day", defaultValue: 1));
- }
+ ///
+ /// Intended for internal use only.
+ /// This method is used for deserializing JSON responses and should not be called directly.
+ ///
+ internal static Citation FromJson(Dictionary jsonDict)
+ {
+ // If there is a Uri, need to convert it.
+ Uri uri = null;
+ if (jsonDict.TryParseValue("uri", out string uriString))
+ {
+ uri = new Uri(uriString);
+ }
- return new Citation(
- jsonDict.ParseValue("startIndex"),
- jsonDict.ParseValue("endIndex"),
- uri,
- jsonDict.ParseValue("title"),
- jsonDict.ParseValue("license"),
- pubDate);
+ // If there is a publication date, we need to convert it.
+ DateTime? pubDate = null;
+ if (jsonDict.TryParseValue("publicationDate", out Dictionary dateDict))
+ {
+ // Make sure that if any key is missing, it has a default value that will work with DateTime.
+ pubDate = new DateTime(
+ dateDict.ParseValue("year", defaultValue: 1),
+ dateDict.ParseValue("month", defaultValue: 1),
+ dateDict.ParseValue("day", defaultValue: 1));
+ }
+
+ return new Citation(
+ jsonDict.ParseValue("startIndex"),
+ jsonDict.ParseValue("endIndex"),
+ uri,
+ jsonDict.ParseValue("title"),
+ jsonDict.ParseValue("license"),
+ pubDate);
+ }
}
-}
}
diff --git a/firebaseai/src/CountTokensResponse.cs b/firebaseai/src/CountTokensResponse.cs
index 49314a4c..2a765559 100644
--- a/firebaseai/src/CountTokensResponse.cs
+++ b/firebaseai/src/CountTokensResponse.cs
@@ -19,69 +19,75 @@
using Google.MiniJSON;
using Firebase.AI.Internal;
-namespace Firebase.AI {
-
-///
-/// The model's response to a count tokens request.
-///
-public readonly struct CountTokensResponse {
- ///
- /// The total number of tokens in the input given to the model as a prompt.
- ///
- public int TotalTokens { get; }
+namespace Firebase.AI
+{
///
- /// The total number of billable characters in the text input given to the model as a prompt.
- ///
- /// > Important: This does not include billable image, video or other non-text input. See
- /// [Firebase AI pricing](https://firebase.google.com/docs/vertex-ai/pricing) for details.
+ /// The model's response to a count tokens request.
///
- ///
- /// Use TotalTokens instead; Gemini 2.0 series models and newer are always billed by token count.
- ///
- /// @deprecated Use TotalTokens instead; Gemini 2.0 series models and newer are always
- /// billed by token count.
- [Obsolete("Use TotalTokens instead; Gemini 2.0 series models and newer are always billed by token count.")]
- public int? TotalBillableCharacters { get; }
+ public readonly struct CountTokensResponse
+ {
+ ///
+ /// The total number of tokens in the input given to the model as a prompt.
+ ///
+ public int TotalTokens { get; }
+ ///
+ /// The total number of billable characters in the text input given to the model as a prompt.
+ ///
+ /// > Important: This does not include billable image, video or other non-text input. See
+ /// [Firebase AI pricing](https://firebase.google.com/docs/vertex-ai/pricing) for details.
+ ///
+ ///
+ /// Use TotalTokens instead; Gemini 2.0 series models and newer are always billed by token count.
+ ///
+ /// @deprecated Use TotalTokens instead; Gemini 2.0 series models and newer are always
+ /// billed by token count.
+ [Obsolete("Use TotalTokens instead; Gemini 2.0 series models and newer are always billed by token count.")]
+ public int? TotalBillableCharacters { get; }
- private readonly IReadOnlyList _promptTokensDetails;
- ///
- /// The breakdown, by modality, of how many tokens are consumed by the prompt.
- ///
- public IReadOnlyList PromptTokensDetails {
- get {
- return _promptTokensDetails ?? new List();
+ private readonly IReadOnlyList _promptTokensDetails;
+ ///
+ /// The breakdown, by modality, of how many tokens are consumed by the prompt.
+ ///
+ public IReadOnlyList PromptTokensDetails
+ {
+ get
+ {
+ return _promptTokensDetails ?? new List();
+ }
}
- }
- // Hidden constructor, users don't need to make this
- private CountTokensResponse(int totalTokens,
- int? totalBillableCharacters = null,
- List promptTokensDetails = null) {
- TotalTokens = totalTokens;
+ // Hidden constructor, users don't need to make this
+ private CountTokensResponse(int totalTokens,
+ int? totalBillableCharacters = null,
+ List promptTokensDetails = null)
+ {
+ TotalTokens = totalTokens;
#pragma warning disable CS0618
- TotalBillableCharacters = totalBillableCharacters;
+ TotalBillableCharacters = totalBillableCharacters;
#pragma warning restore CS0618
- _promptTokensDetails = promptTokensDetails;
- }
+ _promptTokensDetails = promptTokensDetails;
+ }
- ///
- /// Intended for internal use only.
- /// This method is used for deserializing JSON responses and should not be called directly.
- ///
- internal static CountTokensResponse FromJson(string jsonString) {
- return FromJson(Json.Deserialize(jsonString) as Dictionary);
- }
+ ///
+ /// Intended for internal use only.
+ /// This method is used for deserializing JSON responses and should not be called directly.
+ ///
+ internal static CountTokensResponse FromJson(string jsonString)
+ {
+ return FromJson(Json.Deserialize(jsonString) as Dictionary);
+ }
- ///
- /// Intended for internal use only.
- /// This method is used for deserializing JSON responses and should not be called directly.
- ///
- internal static CountTokensResponse FromJson(Dictionary jsonDict) {
- return new CountTokensResponse(
- jsonDict.ParseValue("totalTokens"),
- jsonDict.ParseNullableValue("totalBillableCharacters"),
- jsonDict.ParseObjectList("promptTokensDetails", ModalityTokenCount.FromJson));
+ ///
+ /// Intended for internal use only.
+ /// This method is used for deserializing JSON responses and should not be called directly.
+ ///
+ internal static CountTokensResponse FromJson(Dictionary jsonDict)
+ {
+ return new CountTokensResponse(
+ jsonDict.ParseValue("totalTokens"),
+ jsonDict.ParseNullableValue("totalBillableCharacters"),
+ jsonDict.ParseObjectList("promptTokensDetails", ModalityTokenCount.FromJson));
+ }
}
-}
}
diff --git a/firebaseai/src/FirebaseAI.cs b/firebaseai/src/FirebaseAI.cs
index 3196c77d..1f43195a 100644
--- a/firebaseai/src/FirebaseAI.cs
+++ b/firebaseai/src/FirebaseAI.cs
@@ -17,196 +17,214 @@
using System;
using System.Collections.Concurrent;
-namespace Firebase.AI {
-
-///
-/// The entry point for all Firebase AI SDK functionality.
-///
-public class FirebaseAI {
-
+namespace Firebase.AI
+{
///
- /// Defines which backend AI service is being used, provided to `FirebaseAI.GetInstance`.
+ /// The entry point for all Firebase AI SDK functionality.
///
- public readonly struct Backend {
- ///
- /// Intended for internal use only.
- /// Defines the possible types of backend providers.
- ///
- internal enum InternalProvider {
- GoogleAI,
- VertexAI
- }
+ public class FirebaseAI
+ {
///
- /// Intended for internal use only.
- /// The backend provider being used.
- ///
- internal InternalProvider Provider { get; }
- ///
- /// Intended for internal use only.
- /// The region identifier used by the Vertex AI backend.
+ /// Defines which backend AI service is being used, provided to `FirebaseAI.GetInstance`.
///
- internal string Location { get; }
+ public readonly struct Backend
+ {
+ ///
+ /// Intended for internal use only.
+ /// Defines the possible types of backend providers.
+ ///
+ internal enum InternalProvider
+ {
+ GoogleAI,
+ VertexAI
+ }
- private Backend(InternalProvider provider, string location = null) {
- Provider = provider;
- Location = location;
+ ///
+ /// Intended for internal use only.
+ /// The backend provider being used.
+ ///
+ internal InternalProvider Provider { get; }
+ ///
+ /// Intended for internal use only.
+ /// The region identifier used by the Vertex AI backend.
+ ///
+ internal string Location { get; }
+
+ private Backend(InternalProvider provider, string location = null)
+ {
+ Provider = provider;
+ Location = location;
+ }
+
+ ///
+ /// The Google AI backend service configuration.
+ ///
+ public static Backend GoogleAI()
+ {
+ return new Backend(InternalProvider.GoogleAI);
+ }
+
+ ///
+ /// The Vertex AI backend service configuration.
+ ///
+ /// The region identifier, defaulting to `us-central1`; see [Vertex AI
+ /// regions](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions)
+ /// for a list of supported regions.
+ public static Backend VertexAI(string location = "us-central1")
+ {
+ if (string.IsNullOrWhiteSpace(location) || location.Contains("/"))
+ {
+ throw new ArgumentException(
+ $"The location argument must be non-empty, and not contain special characters like '/'");
+ }
+
+ return new Backend(InternalProvider.VertexAI, location);
+ }
+
+ public override readonly string ToString()
+ {
+ return $"FirebaseAIBackend|{Provider}|{Location}";
+ }
}
- ///
- /// The Google AI backend service configuration.
- ///
- public static Backend GoogleAI() {
- return new Backend(InternalProvider.GoogleAI);
+ private static readonly ConcurrentDictionary _instances = new();
+
+ private readonly FirebaseApp _firebaseApp;
+ private readonly Backend _backend;
+
+ private FirebaseAI(FirebaseApp firebaseApp, Backend backend)
+ {
+ _firebaseApp = firebaseApp;
+ _backend = backend;
}
///
- /// The Vertex AI backend service configuration.
+ /// Returns a `FirebaseAI` instance with the default `FirebaseApp` and GoogleAI Backend.
///
- /// The region identifier, defaulting to `us-central1`; see [Vertex AI
- /// regions](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions)
- /// for a list of supported regions.
- public static Backend VertexAI(string location = "us-central1") {
- if (string.IsNullOrWhiteSpace(location) || location.Contains("/")) {
- throw new ArgumentException(
- $"The location argument must be non-empty, and not contain special characters like '/'");
+ public static FirebaseAI DefaultInstance
+ {
+ get
+ {
+ return GetInstance();
}
-
- return new Backend(InternalProvider.VertexAI, location);
}
- public override readonly string ToString() {
- return $"FirebaseAIBackend|{Provider}|{Location}";
+ ///
+ /// Returns a `FirebaseAI` instance with the default `FirebaseApp` and the given Backend.
+ ///
+ /// The backend AI service to use.
+ /// A configured instance of `FirebaseAI`.
+ public static FirebaseAI GetInstance(Backend? backend = null)
+ {
+ return GetInstance(FirebaseApp.DefaultInstance, backend);
}
- }
-
- private static readonly ConcurrentDictionary _instances = new();
+ ///
+ /// Returns a `FirebaseAI` instance with the given `FirebaseApp` and Backend.
+ ///
+ /// The custom `FirebaseApp` used for initialization.
+ /// The backend AI service to use.
+ /// A configured instance of `FirebaseAI`.
+ public static FirebaseAI GetInstance(FirebaseApp app, Backend? backend = null)
+ {
+ if (app == null)
+ {
+ throw new ArgumentNullException(nameof(app));
+ }
- private readonly FirebaseApp _firebaseApp;
- private readonly Backend _backend;
+ Backend resolvedBackend = backend ?? Backend.GoogleAI();
- private FirebaseAI(FirebaseApp firebaseApp, Backend backend) {
- _firebaseApp = firebaseApp;
- _backend = backend;
- }
+ // FirebaseAI instances are keyed by a combination of the app name and backend.
+ string key = $"{app.Name}::{resolvedBackend}";
+ if (_instances.ContainsKey(key))
+ {
+ return _instances[key];
+ }
- ///
- /// Returns a `FirebaseAI` instance with the default `FirebaseApp` and GoogleAI Backend.
- ///
- public static FirebaseAI DefaultInstance {
- get {
- return GetInstance();
+ return _instances.GetOrAdd(key, _ => new FirebaseAI(app, resolvedBackend));
}
- }
- ///
- /// Returns a `FirebaseAI` instance with the default `FirebaseApp` and the given Backend.
- ///
- /// The backend AI service to use.
- /// A configured instance of `FirebaseAI`.
- public static FirebaseAI GetInstance(Backend? backend = null) {
- return GetInstance(FirebaseApp.DefaultInstance, backend);
- }
- ///
- /// Returns a `FirebaseAI` instance with the given `FirebaseApp` and Backend.
- ///
- /// The custom `FirebaseApp` used for initialization.
- /// The backend AI service to use.
- /// A configured instance of `FirebaseAI`.
- public static FirebaseAI GetInstance(FirebaseApp app, Backend? backend = null) {
- if (app == null) {
- throw new ArgumentNullException(nameof(app));
+ ///
+ /// Initializes a generative model with the given parameters.
+ ///
+ /// - Note: Refer to [Gemini models](https://firebase.google.com/docs/vertex-ai/gemini-models) for
+ /// guidance on choosing an appropriate model for your use case.
+ ///
+ /// The name of the model to use; see
+ /// [available model names
+ /// ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a
+ /// list of supported model names.
+ /// The content generation parameters your model should use.
+ /// A value describing what types of harmful content your model should allow.
+ /// A list of `Tool` objects that the model may use to generate the next response.
+ /// Tool configuration for any `Tool` specified in the request.
+ /// Instructions that direct the model to behave a certain way;
+ /// currently only text content is supported.
+ /// Configuration parameters for sending requests to the backend.
+ /// The initialized `GenerativeModel` instance.
+ public GenerativeModel GetGenerativeModel(
+ string modelName,
+ GenerationConfig? generationConfig = null,
+ SafetySetting[] safetySettings = null,
+ Tool[] tools = null,
+ ToolConfig? toolConfig = null,
+ ModelContent? systemInstruction = null,
+ RequestOptions? requestOptions = null)
+ {
+ return new GenerativeModel(_firebaseApp, _backend, modelName,
+ generationConfig, safetySettings, tools,
+ toolConfig, systemInstruction, requestOptions);
}
- Backend resolvedBackend = backend ?? Backend.GoogleAI();
-
- // FirebaseAI instances are keyed by a combination of the app name and backend.
- string key = $"{app.Name}::{resolvedBackend}";
- if (_instances.ContainsKey(key)) {
- return _instances[key];
+ ///
+ /// Initializes a `LiveGenerativeModel` for real-time interaction.
+ ///
+ /// - Note: Refer to [Gemini models](https://firebase.google.com/docs/vertex-ai/gemini-models) for
+ /// guidance on choosing an appropriate model for your use case.
+ ///
+ /// The name of the model to use; see
+ /// [available model names
+ /// ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a
+ /// list of supported model names.
+ /// The content generation parameters your model should use.
+ /// A list of `Tool` objects that the model may use to generate the next response.
+ /// Instructions that direct the model to behave a certain way.
+ /// Configuration parameters for sending requests to the backend.
+ /// The initialized `LiveGenerativeModel` instance.
+ public LiveGenerativeModel GetLiveModel(
+ string modelName,
+ LiveGenerationConfig? liveGenerationConfig = null,
+ Tool[] tools = null,
+ ModelContent? systemInstruction = null,
+ RequestOptions? requestOptions = null)
+ {
+ return new LiveGenerativeModel(_firebaseApp, _backend, modelName,
+ liveGenerationConfig, tools,
+ systemInstruction, requestOptions);
}
- return _instances.GetOrAdd(key, _ => new FirebaseAI(app, resolvedBackend));
- }
-
- ///
- /// Initializes a generative model with the given parameters.
- ///
- /// - Note: Refer to [Gemini models](https://firebase.google.com/docs/vertex-ai/gemini-models) for
- /// guidance on choosing an appropriate model for your use case.
- ///
- /// The name of the model to use; see
- /// [available model names
- /// ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a
- /// list of supported model names.
- /// The content generation parameters your model should use.
- /// A value describing what types of harmful content your model should allow.
- /// A list of `Tool` objects that the model may use to generate the next response.
- /// Tool configuration for any `Tool` specified in the request.
- /// Instructions that direct the model to behave a certain way;
- /// currently only text content is supported.
- /// Configuration parameters for sending requests to the backend.
- /// The initialized `GenerativeModel` instance.
- public GenerativeModel GetGenerativeModel(
- string modelName,
- GenerationConfig? generationConfig = null,
- SafetySetting[] safetySettings = null,
- Tool[] tools = null,
- ToolConfig? toolConfig = null,
- ModelContent? systemInstruction = null,
- RequestOptions? requestOptions = null) {
- return new GenerativeModel(_firebaseApp, _backend, modelName,
- generationConfig, safetySettings, tools,
- toolConfig, systemInstruction, requestOptions);
- }
-
- ///
- /// Initializes a `LiveGenerativeModel` for real-time interaction.
- ///
- /// - Note: Refer to [Gemini models](https://firebase.google.com/docs/vertex-ai/gemini-models) for
- /// guidance on choosing an appropriate model for your use case.
- ///
- /// The name of the model to use; see
- /// [available model names
- /// ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a
- /// list of supported model names.
- /// The content generation parameters your model should use.
- /// A list of `Tool` objects that the model may use to generate the next response.
- /// Instructions that direct the model to behave a certain way.
- /// Configuration parameters for sending requests to the backend.
- /// The initialized `LiveGenerativeModel` instance.
- public LiveGenerativeModel GetLiveModel(
- string modelName,
- LiveGenerationConfig? liveGenerationConfig = null,
- Tool[] tools = null,
- ModelContent? systemInstruction = null,
- RequestOptions? requestOptions = null) {
- return new LiveGenerativeModel(_firebaseApp, _backend, modelName,
- liveGenerationConfig, tools,
- systemInstruction, requestOptions);
- }
-
- ///
- /// Initializes an `ImagenModel` with the given parameters.
- ///
- /// - Important: Only Imagen 3 models (named `imagen-3.0-*`) are supported.
- ///
- /// The name of the Imagen 3 model to use, for example `"imagen-3.0-generate-002"`;
- /// see [model versions](https://firebase.google.com/docs/vertex-ai/models) for a list of
- /// supported Imagen 3 models.
- /// Configuration options for generating images with Imagen.
- /// Settings describing what types of potentially harmful content your model
- /// should allow.
- /// Configuration parameters for sending requests to the backend.
- /// The initialized `ImagenModel` instance.
- public ImagenModel GetImagenModel(
- string modelName,
- ImagenGenerationConfig? generationConfig = null,
- ImagenSafetySettings? safetySettings = null,
- RequestOptions? requestOptions = null) {
- return new ImagenModel(_firebaseApp, _backend, modelName,
- generationConfig, safetySettings, requestOptions);
+ ///
+ /// Initializes an `ImagenModel` with the given parameters.
+ ///
+ /// - Important: Only Imagen 3 models (named `imagen-3.0-*`) are supported.
+ ///
+ /// The name of the Imagen 3 model to use, for example `"imagen-3.0-generate-002"`;
+ /// see [model versions](https://firebase.google.com/docs/vertex-ai/models) for a list of
+ /// supported Imagen 3 models.
+ /// Configuration options for generating images with Imagen.
+ /// Settings describing what types of potentially harmful content your model
+ /// should allow.
+ /// Configuration parameters for sending requests to the backend.
+ /// The initialized `ImagenModel` instance.
+ public ImagenModel GetImagenModel(
+ string modelName,
+ ImagenGenerationConfig? generationConfig = null,
+ ImagenSafetySettings? safetySettings = null,
+ RequestOptions? requestOptions = null)
+ {
+ return new ImagenModel(_firebaseApp, _backend, modelName,
+ generationConfig, safetySettings, requestOptions);
+ }
}
-}
}
diff --git a/firebaseai/src/FunctionCalling.cs b/firebaseai/src/FunctionCalling.cs
index c02332ca..66e67359 100644
--- a/firebaseai/src/FunctionCalling.cs
+++ b/firebaseai/src/FunctionCalling.cs
@@ -19,268 +19,297 @@
using System.Linq;
using Firebase.AI.Internal;
-namespace Firebase.AI {
-
-///
-/// Structured representation of a function declaration.
-///
-/// This `FunctionDeclaration` is a representation of a block of code that can be used
-/// as a `Tool` by the model and executed by the client.
-///
-/// Function calling can be used to provide data to the model that was not known at the time it
-/// was trained (for example, the current date or weather conditions) or to allow it to interact
-/// with external systems (for example, making an API request or querying/updating a database).
-/// For more details and use cases, see [Introduction to function
-/// calling](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling).
-///
-public readonly struct FunctionDeclaration {
- // No public properties, on purpose since it is meant for user input only
-
- private string Name { get; }
- private string Description { get; }
- private Schema Parameters { get; }
-
+namespace Firebase.AI
+{
///
- /// Constructs a new `FunctionDeclaration`.
+ /// Structured representation of a function declaration.
+ ///
+ /// This `FunctionDeclaration` is a representation of a block of code that can be used
+ /// as a `Tool` by the model and executed by the client.
+ ///
+ /// Function calling can be used to provide data to the model that was not known at the time it
+ /// was trained (for example, the current date or weather conditions) or to allow it to interact
+ /// with external systems (for example, making an API request or querying/updating a database).
+ /// For more details and use cases, see [Introduction to function
+ /// calling](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling).
///
- /// The name of the function; must be a-z, A-Z, 0-9, or contain
- /// underscores and dashes, with a maximum length of 63.
- /// A brief description of the function.
- /// Describes the parameters to this function.
- /// The names of parameters that may be omitted by the model
- /// in function calls; by default, all parameters are considered required.
- public FunctionDeclaration(string name, string description,
- IDictionary parameters,
- IEnumerable optionalParameters = null) {
- Name = name;
- Description = description;
- Parameters = Schema.Object(parameters, optionalParameters);
- }
+ public readonly struct FunctionDeclaration
+ {
+ // No public properties, on purpose since it is meant for user input only
- ///
- /// Intended for internal use only.
- /// This method is used for serializing the object to JSON for the API request.
- ///
- internal Dictionary ToJson() {
- return new() {
+ private string Name { get; }
+ private string Description { get; }
+ private Schema Parameters { get; }
+
+ ///
+ /// Constructs a new `FunctionDeclaration`.
+ ///
+ /// The name of the function; must be a-z, A-Z, 0-9, or contain
+ /// underscores and dashes, with a maximum length of 63.
+ /// A brief description of the function.
+ /// Describes the parameters to this function.
+ /// The names of parameters that may be omitted by the model
+ /// in function calls; by default, all parameters are considered required.
+ public FunctionDeclaration(string name, string description,
+ IDictionary parameters,
+ IEnumerable optionalParameters = null)
+ {
+ Name = name;
+ Description = description;
+ Parameters = Schema.Object(parameters, optionalParameters);
+ }
+
+ ///
+ /// Intended for internal use only.
+ /// This method is used for serializing the object to JSON for the API request.
+ ///
+ internal Dictionary ToJson()
+ {
+ return new() {
{ "name", Name },
{ "description", Description },
{ "parameters", Parameters.ToJson() },
};
+ }
}
-}
-
-///
-/// A tool that allows the generative model to connect to Google Search to access and incorporate
-/// up-to-date information from the web into its responses.
-///
-/// > Important: When using this feature, you are required to comply with the
-/// "Grounding with Google Search" usage requirements for your chosen API provider:
-/// [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search)
-/// or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms)
-/// section within the Service Specific Terms).
-///
-public readonly struct GoogleSearch {}
-
-///
-/// A tool that allows the model to execute code.
-///
-/// This tool can be used to solve complex problems, for example, by generating and executing Python
-/// code to solve a math problem.
-///
-public readonly struct CodeExecution {}
-
-///
-/// A helper tool that the model may use when generating responses.
-///
-/// A `Tool` is a piece of code that enables the system to interact with external systems to
-/// perform an action, or set of actions, outside of knowledge and scope of the model.
-///
-public readonly struct Tool {
- // No public properties, on purpose since it is meant for user input only
-
- private List FunctionDeclarations { get; }
- private GoogleSearch? GoogleSearch { get; }
- private CodeExecution? CodeExecution { get; }
- private UrlContext? UrlContext { get; }
///
- /// Creates a tool that allows the model to perform function calling.
- ///
- /// A list of `FunctionDeclarations` available to the model
- /// that can be used for function calling.
- public Tool(params FunctionDeclaration[] functionDeclarations) {
- FunctionDeclarations = new List(functionDeclarations);
- GoogleSearch = null;
- CodeExecution = null;
- UrlContext = null;
- }
- ///
- /// Creates a tool that allows the model to perform function calling.
+ /// A tool that allows the generative model to connect to Google Search to access and incorporate
+ /// up-to-date information from the web into its responses.
+ ///
+ /// > Important: When using this feature, you are required to comply with the
+ /// "Grounding with Google Search" usage requirements for your chosen API provider:
+ /// [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search)
+ /// or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms)
+ /// section within the Service Specific Terms).
///
- /// A list of `FunctionDeclarations` available to the model
- /// that can be used for function calling.
- public Tool(IEnumerable functionDeclarations) {
- FunctionDeclarations = new List(functionDeclarations);
- GoogleSearch = null;
- CodeExecution = null;
- UrlContext = null;
- }
+ public readonly struct GoogleSearch { }
///
- /// Creates a tool that allows the model to use Grounding with Google Search.
- ///
- /// An empty `GoogleSearch` object. The presence of this object
- /// in the list of tools enables the model to use Google Search.
- public Tool(GoogleSearch googleSearch) {
- FunctionDeclarations = null;
- GoogleSearch = googleSearch;
- CodeExecution = null;
- UrlContext = null;
- }
-
- ///
- /// Creates a tool that allows the model to use Code Execution.
- ///
- /// An empty `CodeExecution` object. The presence of this object
- /// in the list of tools enables the model to use Code Execution.
- public Tool(CodeExecution codeExecution) {
- FunctionDeclarations = null;
- GoogleSearch = null;
- CodeExecution = codeExecution;
- UrlContext = null;
- }
-
- ///
- /// Creates a tool that allows you to provide additional context to the models in the form of
- /// public web URLs.
+ /// A tool that allows the model to execute code.
+ ///
+ /// This tool can be used to solve complex problems, for example, by generating and executing Python
+ /// code to solve a math problem.
///
- /// An empty `UrlContext` object. The presence of this object
- /// in the list of tools enables the model to use Url Contexts.
- public Tool(UrlContext urlContext) {
- FunctionDeclarations = null;
- GoogleSearch = null;
- CodeExecution = null;
- UrlContext = urlContext;
- }
+ public readonly struct CodeExecution { }
///
- /// Intended for internal use only.
- /// This method is used for serializing the object to JSON for the API request.
+ /// A helper tool that the model may use when generating responses.
+ ///
+ /// A `Tool` is a piece of code that enables the system to interact with external systems to
+ /// perform an action, or set of actions, outside of knowledge and scope of the model.
///
- internal Dictionary ToJson() {
- var json = new Dictionary();
- if (FunctionDeclarations != null && FunctionDeclarations.Any()) {
- json["functionDeclarations"] = FunctionDeclarations.Select(f => f.ToJson()).ToList();
- }
- if (GoogleSearch.HasValue) {
- json["googleSearch"] = new Dictionary();
+ public readonly struct Tool
+ {
+ // No public properties, on purpose since it is meant for user input only
+
+ private List FunctionDeclarations { get; }
+ private GoogleSearch? GoogleSearch { get; }
+ private CodeExecution? CodeExecution { get; }
+ private UrlContext? UrlContext { get; }
+
+ ///
+ /// Creates a tool that allows the model to perform function calling.
+ ///
+ /// A list of `FunctionDeclarations` available to the model
+ /// that can be used for function calling.
+ public Tool(params FunctionDeclaration[] functionDeclarations)
+ {
+ FunctionDeclarations = new List(functionDeclarations);
+ GoogleSearch = null;
+ CodeExecution = null;
+ UrlContext = null;
}
- if (CodeExecution.HasValue) {
- json["codeExecution"] = new Dictionary();
+ ///
+ /// Creates a tool that allows the model to perform function calling.
+ ///
+ /// A list of `FunctionDeclarations` available to the model
+ /// that can be used for function calling.
+ public Tool(IEnumerable functionDeclarations)
+ {
+ FunctionDeclarations = new List(functionDeclarations);
+ GoogleSearch = null;
+ CodeExecution = null;
+ UrlContext = null;
}
- if (UrlContext.HasValue) {
- json["urlContext"] = new Dictionary();
+
+ ///
+ /// Creates a tool that allows the model to use Grounding with Google Search.
+ ///
+ /// An empty `GoogleSearch` object. The presence of this object
+ /// in the list of tools enables the model to use Google Search.
+ public Tool(GoogleSearch googleSearch)
+ {
+ FunctionDeclarations = null;
+ GoogleSearch = googleSearch;
+ CodeExecution = null;
+ UrlContext = null;
}
- return json;
- }
-}
-///
-/// Tool configuration for any `Tool` specified in the request.
-///
-public readonly struct ToolConfig {
- // No public properties, on purpose since it is meant for user input only
+ ///
+ /// Creates a tool that allows the model to use Code Execution.
+ ///
+ /// An empty `CodeExecution` object. The presence of this object
+ /// in the list of tools enables the model to use Code Execution.
+ public Tool(CodeExecution codeExecution)
+ {
+ FunctionDeclarations = null;
+ GoogleSearch = null;
+ CodeExecution = codeExecution;
+ UrlContext = null;
+ }
- private FunctionCallingConfig? Config { get; }
+ ///
+ /// Creates a tool that allows you to provide additional context to the models in the form of
+ /// public web URLs.
+ ///
+ /// An empty `UrlContext` object. The presence of this object
+ /// in the list of tools enables the model to use Url Contexts.
+ public Tool(UrlContext urlContext)
+ {
+ FunctionDeclarations = null;
+ GoogleSearch = null;
+ CodeExecution = null;
+ UrlContext = urlContext;
+ }
- ///
- /// Constructs a new `ToolConfig`.
- ///
- /// Configures how the model should use the
- /// provided functions.
- public ToolConfig(FunctionCallingConfig? functionCallingConfig = null) {
- Config = functionCallingConfig;
+ ///
+ /// Intended for internal use only.
+ /// This method is used for serializing the object to JSON for the API request.
+ ///
+ internal Dictionary ToJson()
+ {
+ var json = new Dictionary();
+ if (FunctionDeclarations != null && FunctionDeclarations.Any())
+ {
+ json["functionDeclarations"] = FunctionDeclarations.Select(f => f.ToJson()).ToList();
+ }
+ if (GoogleSearch.HasValue)
+ {
+ json["googleSearch"] = new Dictionary();
+ }
+ if (CodeExecution.HasValue)
+ {
+ json["codeExecution"] = new Dictionary();
+ }
+ if (UrlContext.HasValue)
+ {
+ json["urlContext"] = new Dictionary();
+ }
+ return json;
+ }
}
///
- /// Intended for internal use only.
- /// This method is used for serializing the object to JSON for the API request.
+ /// Tool configuration for any `Tool` specified in the request.
///
- internal Dictionary ToJson() {
- var json = new Dictionary();
- if (Config.HasValue) {
- json["functionCallingConfig"] = Config?.ToJson();
- }
- return json;
- }
-}
+ public readonly struct ToolConfig
+ {
+ // No public properties, on purpose since it is meant for user input only
-///
-/// Configuration for specifying function calling behavior.
-///
-public readonly struct FunctionCallingConfig {
- // No public properties, on purpose since it is meant for user input only
+ private FunctionCallingConfig? Config { get; }
- private string Mode { get; }
- private List AllowedFunctionNames { get; }
+ ///
+ /// Constructs a new `ToolConfig`.
+ ///
+ /// Configures how the model should use the
+ /// provided functions.
+ public ToolConfig(FunctionCallingConfig? functionCallingConfig = null)
+ {
+ Config = functionCallingConfig;
+ }
- private FunctionCallingConfig(string mode, IEnumerable allowedFunctionNames = null) {
- Mode = mode;
- if (allowedFunctionNames != null) {
- AllowedFunctionNames = new List(allowedFunctionNames);
- } else {
- AllowedFunctionNames = null;
+ ///
+ /// Intended for internal use only.
+ /// This method is used for serializing the object to JSON for the API request.
+ ///
+ internal Dictionary ToJson()
+ {
+ var json = new Dictionary();
+ if (Config.HasValue)
+ {
+ json["functionCallingConfig"] = Config?.ToJson();
+ }
+ return json;
}
}
///
- /// Creates a function calling config where the model calls functions at its discretion.
- ///
- /// > Note: This is the default behavior.
+ /// Configuration for specifying function calling behavior.
///
- public static FunctionCallingConfig Auto() {
- return new FunctionCallingConfig("AUTO");
- }
+ public readonly struct FunctionCallingConfig
+ {
+ // No public properties, on purpose since it is meant for user input only
- ///
- /// Creates a function calling config where the model will always call a provided function.
- ///
- /// A set of function names that, when provided, limits the
- /// function that the model will call.
- public static FunctionCallingConfig Any(params string[] allowedFunctionNames) {
- return new FunctionCallingConfig("ANY", allowedFunctionNames);
- }
- ///
- /// Creates a function calling config where the model will always call a provided function.
- ///
- /// A set of function names that, when provided, limits the
- /// function that the model will call.
- public static FunctionCallingConfig Any(IEnumerable allowedFunctionNames) {
- return new FunctionCallingConfig("ANY", allowedFunctionNames);
- }
+ private string Mode { get; }
+ private List AllowedFunctionNames { get; }
- /// Creates a function calling config where the model will never call a function.
- ///
- /// > Note: This can also be achieved by not passing any `FunctionDeclaration` tools when
- /// instantiating the model.
- public static FunctionCallingConfig None() {
- return new FunctionCallingConfig("NONE");
- }
+ private FunctionCallingConfig(string mode, IEnumerable allowedFunctionNames = null)
+ {
+ Mode = mode;
+ if (allowedFunctionNames != null)
+ {
+ AllowedFunctionNames = new List(allowedFunctionNames);
+ }
+ else
+ {
+ AllowedFunctionNames = null;
+ }
+ }
- ///
- /// Intended for internal use only.
- /// This method is used for serializing the object to JSON for the API request.
- ///
- internal Dictionary ToJson() {
- var json = new Dictionary() {
+ ///
+ /// Creates a function calling config where the model calls functions at its discretion.
+ ///
+ /// > Note: This is the default behavior.
+ ///
+ public static FunctionCallingConfig Auto()
+ {
+ return new FunctionCallingConfig("AUTO");
+ }
+
+ ///
+ /// Creates a function calling config where the model will always call a provided function.
+ ///
+ /// A set of function names that, when provided, limits the
+ /// function that the model will call.
+ public static FunctionCallingConfig Any(params string[] allowedFunctionNames)
+ {
+ return new FunctionCallingConfig("ANY", allowedFunctionNames);
+ }
+ ///
+ /// Creates a function calling config where the model will always call a provided function.
+ ///
+ /// A set of function names that, when provided, limits the
+ /// function that the model will call.
+ public static FunctionCallingConfig Any(IEnumerable allowedFunctionNames)
+ {
+ return new FunctionCallingConfig("ANY", allowedFunctionNames);
+ }
+
+ /// Creates a function calling config where the model will never call a function.
+ ///
+ /// > Note: This can also be achieved by not passing any `FunctionDeclaration` tools when
+ /// instantiating the model.
+ public static FunctionCallingConfig None()
+ {
+ return new FunctionCallingConfig("NONE");
+ }
+
+ ///
+ /// Intended for internal use only.
+ /// This method is used for serializing the object to JSON for the API request.
+ ///
+ internal Dictionary ToJson()
+ {
+ var json = new Dictionary() {
{ "mode", Mode }
};
- if (AllowedFunctionNames != null) {
- json["allowedFunctionNames"] = AllowedFunctionNames;
+ if (AllowedFunctionNames != null)
+ {
+ json["allowedFunctionNames"] = AllowedFunctionNames;
+ }
+ return json;
}
- return json;
}
-}
}
diff --git a/firebaseai/src/GenerateContentResponse.cs b/firebaseai/src/GenerateContentResponse.cs
index 6d1adae2..8dd167df 100644
--- a/firebaseai/src/GenerateContentResponse.cs
+++ b/firebaseai/src/GenerateContentResponse.cs
@@ -20,525 +20,582 @@
using Google.MiniJSON;
using Firebase.AI.Internal;
-namespace Firebase.AI {
-
-///
-/// The model's response to a generate content request.
-///
-public readonly struct GenerateContentResponse {
- private readonly IReadOnlyList _candidates;
-
+namespace Firebase.AI
+{
///
- /// A list of candidate response content, ordered from best to worst.
+ /// The model's response to a generate content request.
///
- public IReadOnlyList Candidates {
- get {
- return _candidates ?? new List();
- }
- }
+ public readonly struct GenerateContentResponse
+ {
+ private readonly IReadOnlyList _candidates;
- ///
- /// A value containing the safety ratings for the response, or,
- /// if the request was blocked, a reason for blocking the request.
- ///
- public PromptFeedback? PromptFeedback { get; }
-
- ///
- /// Token usage metadata for processing the generate content request.
- ///
- public UsageMetadata? UsageMetadata { get; }
+ ///
+ /// A list of candidate response content, ordered from best to worst.
+ ///
+ public IReadOnlyList Candidates
+ {
+ get
+ {
+ return _candidates ?? new List();
+ }
+ }
- ///
- /// The response's content as text, if it exists.
- ///
- public string Text {
- get {
- // Concatenate all of the text parts that aren't thoughts from the first candidate.
- return string.Join(" ",
- Candidates.FirstOrDefault().Content.Parts
- .OfType().Where(tp => !tp.IsThought).Select(tp => tp.Text));
+ ///
+ /// A value containing the safety ratings for the response, or,
+ /// if the request was blocked, a reason for blocking the request.
+ ///
+ public PromptFeedback? PromptFeedback { get; }
+
+ ///
+ /// Token usage metadata for processing the generate content request.
+ ///
+ public UsageMetadata? UsageMetadata { get; }
+
+ ///
+ /// The response's content as text, if it exists.
+ ///
+ public string Text
+ {
+ get
+ {
+ // Concatenate all of the text parts that aren't thoughts from the first candidate.
+ return string.Join(" ",
+ Candidates.FirstOrDefault().Content.Parts
+ .OfType().Where(tp => !tp.IsThought).Select(tp => tp.Text));
+ }
}
- }
-
- ///
- /// A summary of the model's thinking process, if available.
- ///
- /// Note that Thought Summaries are only available when `IncludeThoughts` is enabled
- /// in the `ThinkingConfig`. For more information, see the
- /// [Thinking](https://firebase.google.com/docs/ai-logic/thinking) documentation.
- ///
- public string ThoughtSummary {
- get {
- // Concatenate all of the text parts that are thoughts from the first candidate.
- return string.Join(" ",
- Candidates.FirstOrDefault().Content.Parts
- .OfType().Where(tp => tp.IsThought).Select(tp => tp.Text));
+
+ ///
+ /// A summary of the model's thinking process, if available.
+ ///
+ /// Note that Thought Summaries are only available when `IncludeThoughts` is enabled
+ /// in the `ThinkingConfig`. For more information, see the
+ /// [Thinking](https://firebase.google.com/docs/ai-logic/thinking) documentation.
+ ///
+ public string ThoughtSummary
+ {
+ get
+ {
+ // Concatenate all of the text parts that are thoughts from the first candidate.
+ return string.Join(" ",
+ Candidates.FirstOrDefault().Content.Parts
+ .OfType().Where(tp => tp.IsThought).Select(tp => tp.Text));
+ }
}
- }
- ///
- /// Returns function calls found in any `Part`s of the first candidate of the response, if any.
- ///
- public IReadOnlyList FunctionCalls {
- get {
- return Candidates.FirstOrDefault().Content.Parts
- .OfType().Where(tp => !tp.IsThought).ToList();
+ ///
+ /// Returns function calls found in any `Part`s of the first candidate of the response, if any.
+ ///
+ public IReadOnlyList FunctionCalls
+ {
+ get
+ {
+ return Candidates.FirstOrDefault().Content.Parts
+ .OfType().Where(tp => !tp.IsThought).ToList();
+ }
}
- }
- // Hidden constructor, users don't need to make this.
- private GenerateContentResponse(List candidates, PromptFeedback? promptFeedback,
- UsageMetadata? usageMetadata) {
- _candidates = candidates;
- PromptFeedback = promptFeedback;
- UsageMetadata = usageMetadata;
- }
+ // Hidden constructor, users don't need to make this.
+ private GenerateContentResponse(List candidates, PromptFeedback? promptFeedback,
+ UsageMetadata? usageMetadata)
+ {
+ _candidates = candidates;
+ PromptFeedback = promptFeedback;
+ UsageMetadata = usageMetadata;
+ }
- ///
- /// Intended for internal use only.
- /// This method is used for deserializing JSON responses and should not be called directly.
- ///
- internal static GenerateContentResponse FromJson(string jsonString,
- FirebaseAI.Backend.InternalProvider backend) {
- return FromJson(Json.Deserialize(jsonString) as Dictionary, backend);
- }
+ ///
+ /// Intended for internal use only.
+ /// This method is used for deserializing JSON responses and should not be called directly.
+ ///
+ internal static GenerateContentResponse FromJson(string jsonString,
+ FirebaseAI.Backend.InternalProvider backend)
+ {
+ return FromJson(Json.Deserialize(jsonString) as Dictionary, backend);
+ }
- ///
- /// Intended for internal use only.
- /// This method is used for deserializing JSON responses and should not be called directly.
- ///
- internal static GenerateContentResponse FromJson(Dictionary jsonDict,
- FirebaseAI.Backend.InternalProvider backend) {
- return new GenerateContentResponse(
- jsonDict.ParseObjectList("candidates", (d) => Candidate.FromJson(d, backend)),
- jsonDict.ParseNullableObject("promptFeedback",
- Firebase.AI.PromptFeedback.FromJson),
- jsonDict.ParseNullableObject("usageMetadata",
- Firebase.AI.UsageMetadata.FromJson));
+ ///
+ /// Intended for internal use only.
+ /// This method is used for deserializing JSON responses and should not be called directly.
+ ///
+ internal static GenerateContentResponse FromJson(Dictionary jsonDict,
+ FirebaseAI.Backend.InternalProvider backend)
+ {
+ return new GenerateContentResponse(
+ jsonDict.ParseObjectList("candidates", (d) => Candidate.FromJson(d, backend)),
+ jsonDict.ParseNullableObject("promptFeedback",
+ Firebase.AI.PromptFeedback.FromJson),
+ jsonDict.ParseNullableObject("usageMetadata",
+ Firebase.AI.UsageMetadata.FromJson));
+ }
}
-}
-///
-/// A type describing possible reasons to block a prompt.
-///
-public enum BlockReason {
///
- /// A new and not yet supported value.
- ///
- Unknown = 0,
- ///
- /// The prompt was blocked because it was deemed unsafe.
- ///
- Safety,
- ///
- /// All other block reasons.
- ///
- Other,
- ///
- /// The prompt was blocked because it contained terms from the terminology blocklist.
- ///
- Blocklist,
- ///
- /// The prompt was blocked due to prohibited content.
- ///
- ProhibitedContent,
-}
-
-///
-/// A metadata struct containing any feedback the model had on the prompt it was provided.
-///
-public readonly struct PromptFeedback {
- private readonly IReadOnlyList _safetyRatings;
-
- ///
- /// The reason a prompt was blocked, if it was blocked.
- ///
- public BlockReason? BlockReason { get; }
- ///
- /// A human-readable description of the `BlockReason`.
- ///
- public string BlockReasonMessage { get; }
- ///
- /// The safety ratings of the prompt.
- ///
- public IReadOnlyList SafetyRatings {
- get {
- return _safetyRatings ?? new List();
+ /// A type describing possible reasons to block a prompt.
+ ///
+ public enum BlockReason
+ {
+ ///
+ /// A new and not yet supported value.
+ ///
+ Unknown = 0,
+ ///
+ /// The prompt was blocked because it was deemed unsafe.
+ ///
+ Safety,
+ ///
+ /// All other block reasons.
+ ///
+ Other,
+ ///
+ /// The prompt was blocked because it contained terms from the terminology blocklist.
+ ///
+ Blocklist,
+ ///
+ /// The prompt was blocked due to prohibited content.
+ ///
+ ProhibitedContent,
+ }
+
+ ///
+ /// A metadata struct containing any feedback the model had on the prompt it was provided.
+ ///
+ public readonly struct PromptFeedback
+ {
+ private readonly IReadOnlyList _safetyRatings;
+
+ ///
+ /// The reason a prompt was blocked, if it was blocked.
+ ///
+ public BlockReason? BlockReason { get; }
+ ///
+ /// A human-readable description of the `BlockReason`.
+ ///
+ public string BlockReasonMessage { get; }
+ ///
+ /// The safety ratings of the prompt.
+ ///
+ public IReadOnlyList SafetyRatings
+ {
+ get
+ {
+ return _safetyRatings ?? new List();
+ }
}
- }
- // Hidden constructor, users don't need to make this.
- private PromptFeedback(BlockReason? blockReason, string blockReasonMessage,
- List safetyRatings) {
- BlockReason = blockReason;
- BlockReasonMessage = blockReasonMessage;
- _safetyRatings = safetyRatings;
- }
+ // Hidden constructor, users don't need to make this.
+ private PromptFeedback(BlockReason? blockReason, string blockReasonMessage,
+ List safetyRatings)
+ {
+ BlockReason = blockReason;
+ BlockReasonMessage = blockReasonMessage;
+ _safetyRatings = safetyRatings;
+ }
- private static BlockReason ParseBlockReason(string str) {
- return str switch {
- "SAFETY" => Firebase.AI.BlockReason.Safety,
- "OTHER" => Firebase.AI.BlockReason.Other,
- "BLOCKLIST" => Firebase.AI.BlockReason.Blocklist,
- "PROHIBITED_CONTENT" => Firebase.AI.BlockReason.ProhibitedContent,
- _ => Firebase.AI.BlockReason.Unknown,
- };
- }
+ private static BlockReason ParseBlockReason(string str)
+ {
+ return str switch
+ {
+ "SAFETY" => Firebase.AI.BlockReason.Safety,
+ "OTHER" => Firebase.AI.BlockReason.Other,
+ "BLOCKLIST" => Firebase.AI.BlockReason.Blocklist,
+ "PROHIBITED_CONTENT" => Firebase.AI.BlockReason.ProhibitedContent,
+ _ => Firebase.AI.BlockReason.Unknown,
+ };
+ }
- ///
- /// Intended for internal use only.
- /// This method is used for deserializing JSON responses and should not be called directly.
- ///
- internal static PromptFeedback FromJson(Dictionary jsonDict) {
- return new PromptFeedback(
- jsonDict.ParseNullableEnum("blockReason", ParseBlockReason),
- jsonDict.ParseValue("blockReasonMessage"),
- jsonDict.ParseObjectList("safetyRatings", SafetyRating.FromJson));
+ ///
+ /// Intended for internal use only.
+ /// This method is used for deserializing JSON responses and should not be called directly.
+ ///
+ internal static PromptFeedback FromJson(Dictionary jsonDict)
+ {
+ return new PromptFeedback(
+ jsonDict.ParseNullableEnum("blockReason", ParseBlockReason),
+ jsonDict.ParseValue("blockReasonMessage"),
+ jsonDict.ParseObjectList("safetyRatings", SafetyRating.FromJson));
+ }
}
-}
-
-///
-/// Metadata returned to the client when grounding is enabled.
-///
-/// > Important: If using Grounding with Google Search, you are required to comply with the
-/// "Grounding with Google Search" usage requirements for your chosen API provider:
-/// [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search)
-/// or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms)
-/// section within the Service Specific Terms).
-///
-public readonly struct GroundingMetadata {
- private readonly IReadOnlyList _webSearchQueries;
- private readonly IReadOnlyList _groundingChunks;
- private readonly IReadOnlyList _groundingSupports;
///
- /// A list of web search queries that the model performed to gather the grounding information.
- /// These can be used to allow users to explore the search results themselves.
- ///
- public IReadOnlyList WebSearchQueries {
- get {
- return _webSearchQueries ?? new List();
+ /// Metadata returned to the client when grounding is enabled.
+ ///
+ /// > Important: If using Grounding with Google Search, you are required to comply with the
+ /// "Grounding with Google Search" usage requirements for your chosen API provider:
+ /// [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search)
+ /// or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms)
+ /// section within the Service Specific Terms).
+ ///
+ public readonly struct GroundingMetadata
+ {
+ private readonly IReadOnlyList _webSearchQueries;
+ private readonly IReadOnlyList _groundingChunks;
+ private readonly IReadOnlyList _groundingSupports;
+
+ ///
+ /// A list of web search queries that the model performed to gather the grounding information.
+ /// These can be used to allow users to explore the search results themselves.
+ ///
+ public IReadOnlyList WebSearchQueries
+ {
+ get
+ {
+ return _webSearchQueries ?? new List();
+ }
}
- }
- ///
- /// A list of `GroundingChunk` structs. Each chunk represents a piece of retrieved content
- /// (e.g., from a web page) that the model used to ground its response.
- ///
- public IReadOnlyList GroundingChunks {
- get {
- return _groundingChunks ?? new List();
+ ///
+ /// A list of `GroundingChunk` structs. Each chunk represents a piece of retrieved content
+ /// (e.g., from a web page) that the model used to ground its response.
+ ///
+ public IReadOnlyList GroundingChunks
+ {
+ get
+ {
+ return _groundingChunks ?? new List();
+ }
}
- }
- ///
- /// A list of `GroundingSupport` structs. Each object details how specific segments of the
- /// model's response are supported by the `groundingChunks`.
- ///
- public IReadOnlyList GroundingSupports {
- get {
- return _groundingSupports ?? new List();
+ ///
+ /// A list of `GroundingSupport` structs. Each object details how specific segments of the
+ /// model's response are supported by the `groundingChunks`.
+ ///
+ public IReadOnlyList GroundingSupports
+ {
+ get
+ {
+ return _groundingSupports ?? new List();
+ }
}
- }
- ///
- /// Google Search entry point for web searches.
- /// This contains an HTML/CSS snippet that **must** be embedded in an app to display a Google
- /// Search entry point for follow-up web searches related to the model's "Grounded Response".
- ///
- public SearchEntryPoint? SearchEntryPoint { get; }
-
- private GroundingMetadata(List webSearchQueries, List groundingChunks,
- List groundingSupports, SearchEntryPoint? searchEntryPoint) {
- _webSearchQueries = webSearchQueries;
- _groundingChunks = groundingChunks;
- _groundingSupports = groundingSupports;
- SearchEntryPoint = searchEntryPoint;
- }
+ ///
+ /// Google Search entry point for web searches.
+ /// This contains an HTML/CSS snippet that **must** be embedded in an app to display a Google
+ /// Search entry point for follow-up web searches related to the model's "Grounded Response".
+ ///
+ public SearchEntryPoint? SearchEntryPoint { get; }
- internal static GroundingMetadata FromJson(Dictionary jsonDict) {
- List supports = null;
- if (jsonDict.TryParseValue("groundingSupports", out List