Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
252 changes: 130 additions & 122 deletions firebaseai/src/Candidate.cs
Original file line number Diff line number Diff line change
Expand Up @@ -17,140 +17,148 @@
using System.Collections.Generic;
using Firebase.AI.Internal;

namespace Firebase.AI {
namespace Firebase.AI
{
/// <summary>
/// Represents the reason why the model stopped generating content.
/// </summary>
public enum FinishReason
{
/// <summary>
/// A new and not yet supported value.
/// </summary>
Unknown = 0,
/// <summary>
/// Natural stop point of the model or provided stop sequence.
/// </summary>
Stop,
/// <summary>
/// The maximum number of tokens as specified in the request was reached.
/// </summary>
MaxTokens,
/// <summary>
/// The token generation was stopped because the response was flagged for safety reasons.
/// </summary>
Safety,
/// <summary>
/// The token generation was stopped because the response was flagged for unauthorized citations.
/// </summary>
Recitation,
/// <summary>
/// All other reasons that stopped token generation.
/// </summary>
Other,
/// <summary>
/// Token generation was stopped because the response contained forbidden terms.
/// </summary>
Blocklist,
/// <summary>
/// Token generation was stopped because the response contained potentially prohibited content.
/// </summary>
ProhibitedContent,
/// <summary>
/// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII).
/// </summary>
SPII,
/// <summary>
/// Token generation was stopped because the function call generated by the model was invalid.
/// </summary>
MalformedFunctionCall,
}

/// <summary>
/// Represents the reason why the model stopped generating content.
/// </summary>
public enum FinishReason {
/// <summary>
/// A new and not yet supported value.
/// </summary>
Unknown = 0,
/// <summary>
/// Natural stop point of the model or provided stop sequence.
/// </summary>
Stop,
/// <summary>
/// The maximum number of tokens as specified in the request was reached.
/// </summary>
MaxTokens,
/// <summary>
/// The token generation was stopped because the response was flagged for safety reasons.
/// </summary>
Safety,
/// <summary>
/// The token generation was stopped because the response was flagged for unauthorized citations.
/// </summary>
Recitation,
/// <summary>
/// All other reasons that stopped token generation.
/// A struct representing a possible reply to a content generation prompt.
/// Each content generation prompt may produce multiple candidate responses.
/// </summary>
Other,
/// <summary>
/// Token generation was stopped because the response contained forbidden terms.
/// </summary>
Blocklist,
/// <summary>
/// Token generation was stopped because the response contained potentially prohibited content.
/// </summary>
ProhibitedContent,
/// <summary>
/// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII).
/// </summary>
SPII,
/// <summary>
/// Token generation was stopped because the function call generated by the model was invalid.
/// </summary>
MalformedFunctionCall,
}
public readonly struct Candidate
{
private readonly IReadOnlyList<SafetyRating> _safetyRatings;

/// <summary>
/// A struct representing a possible reply to a content generation prompt.
/// Each content generation prompt may produce multiple candidate responses.
/// </summary>
public readonly struct Candidate {
private readonly IReadOnlyList<SafetyRating> _safetyRatings;
/// <summary>
/// The response’s content.
/// </summary>
public ModelContent Content { get; }

/// <summary>
/// The response’s content.
/// </summary>
public ModelContent Content { get; }

/// <summary>
/// The safety rating of the response content.
/// </summary>
public IReadOnlyList<SafetyRating> SafetyRatings {
get {
return _safetyRatings ?? new List<SafetyRating>();
/// <summary>
/// The safety rating of the response content.
/// </summary>
public IReadOnlyList<SafetyRating> SafetyRatings
{
get
{
return _safetyRatings ?? new List<SafetyRating>();
}
}
}

/// <summary>
/// The reason the model stopped generating content, if it exists;
/// for example, if the model generated a predefined stop sequence.
/// </summary>
public FinishReason? FinishReason { get; }
/// <summary>
/// The reason the model stopped generating content, if it exists;
/// for example, if the model generated a predefined stop sequence.
/// </summary>
public FinishReason? FinishReason { get; }

/// <summary>
/// Cited works in the model’s response content, if it exists.
/// </summary>
public CitationMetadata? CitationMetadata { get; }
/// <summary>
/// Cited works in the model’s response content, if it exists.
/// </summary>
public CitationMetadata? CitationMetadata { get; }

/// <summary>
/// Grounding metadata for the response, if any.
/// </summary>
public GroundingMetadata? GroundingMetadata { get; }

/// <summary>
/// Metadata related to the `URLContext` tool.
/// </summary>
public UrlContextMetadata? UrlContextMetadata { get; }
/// <summary>
/// Grounding metadata for the response, if any.
/// </summary>
public GroundingMetadata? GroundingMetadata { get; }

// Hidden constructor, users don't need to make this.
private Candidate(ModelContent content, List<SafetyRating> safetyRatings,
FinishReason? finishReason, CitationMetadata? citationMetadata,
GroundingMetadata? groundingMetadata, UrlContextMetadata? urlContextMetadata) {
Content = content;
_safetyRatings = safetyRatings ?? new List<SafetyRating>();
FinishReason = finishReason;
CitationMetadata = citationMetadata;
GroundingMetadata = groundingMetadata;
UrlContextMetadata = urlContextMetadata;
}
/// <summary>
/// Metadata related to the `URLContext` tool.
/// </summary>
public UrlContextMetadata? UrlContextMetadata { get; }

private static FinishReason ParseFinishReason(string str) {
return str switch {
"STOP" => Firebase.AI.FinishReason.Stop,
"MAX_TOKENS" => Firebase.AI.FinishReason.MaxTokens,
"SAFETY" => Firebase.AI.FinishReason.Safety,
"RECITATION" => Firebase.AI.FinishReason.Recitation,
"OTHER" => Firebase.AI.FinishReason.Other,
"BLOCKLIST" => Firebase.AI.FinishReason.Blocklist,
"PROHIBITED_CONTENT" => Firebase.AI.FinishReason.ProhibitedContent,
"SPII" => Firebase.AI.FinishReason.SPII,
"MALFORMED_FUNCTION_CALL" => Firebase.AI.FinishReason.MalformedFunctionCall,
_ => Firebase.AI.FinishReason.Unknown,
};
}
// Hidden constructor, users don't need to make this.
private Candidate(ModelContent content, List<SafetyRating> safetyRatings,
FinishReason? finishReason, CitationMetadata? citationMetadata,
GroundingMetadata? groundingMetadata, UrlContextMetadata? urlContextMetadata)
{
Content = content;
_safetyRatings = safetyRatings ?? new List<SafetyRating>();
FinishReason = finishReason;
CitationMetadata = citationMetadata;
GroundingMetadata = groundingMetadata;
UrlContextMetadata = urlContextMetadata;
}

/// <summary>
/// Intended for internal use only.
/// This method is used for deserializing JSON responses and should not be called directly.
/// </summary>
internal static Candidate FromJson(Dictionary<string, object> jsonDict,
FirebaseAI.Backend.InternalProvider backend) {
return new Candidate(
jsonDict.ParseObject("content", ModelContent.FromJson, defaultValue: new ModelContent("model")),
jsonDict.ParseObjectList("safetyRatings", SafetyRating.FromJson),
jsonDict.ParseNullableEnum("finishReason", ParseFinishReason),
jsonDict.ParseNullableObject("citationMetadata",
(d) => Firebase.AI.CitationMetadata.FromJson(d, backend)),
jsonDict.ParseNullableObject("groundingMetadata",
Firebase.AI.GroundingMetadata.FromJson),
jsonDict.ParseNullableObject("urlContextMetadata",
Firebase.AI.UrlContextMetadata.FromJson));
private static FinishReason ParseFinishReason(string str)
{
return str switch
{
"STOP" => Firebase.AI.FinishReason.Stop,
"MAX_TOKENS" => Firebase.AI.FinishReason.MaxTokens,
"SAFETY" => Firebase.AI.FinishReason.Safety,
"RECITATION" => Firebase.AI.FinishReason.Recitation,
"OTHER" => Firebase.AI.FinishReason.Other,
"BLOCKLIST" => Firebase.AI.FinishReason.Blocklist,
"PROHIBITED_CONTENT" => Firebase.AI.FinishReason.ProhibitedContent,
"SPII" => Firebase.AI.FinishReason.SPII,
"MALFORMED_FUNCTION_CALL" => Firebase.AI.FinishReason.MalformedFunctionCall,
_ => Firebase.AI.FinishReason.Unknown,
};
}

/// <summary>
/// Intended for internal use only.
/// This method is used for deserializing JSON responses and should not be called directly.
/// </summary>
internal static Candidate FromJson(Dictionary<string, object> jsonDict,
FirebaseAI.Backend.InternalProvider backend)
{
return new Candidate(
jsonDict.ParseObject("content", ModelContent.FromJson, defaultValue: new ModelContent("model")),
jsonDict.ParseObjectList("safetyRatings", SafetyRating.FromJson),
jsonDict.ParseNullableEnum("finishReason", ParseFinishReason),
jsonDict.ParseNullableObject("citationMetadata",
(d) => Firebase.AI.CitationMetadata.FromJson(d, backend)),
jsonDict.ParseNullableObject("groundingMetadata",
Firebase.AI.GroundingMetadata.FromJson),
jsonDict.ParseNullableObject("urlContextMetadata",
Firebase.AI.UrlContextMetadata.FromJson));
}
}
}

}
Loading