diff --git a/packages/firebase_vertexai/firebase_vertexai/lib/firebase_vertexai.dart b/packages/firebase_vertexai/firebase_vertexai/lib/firebase_vertexai.dart index b8f518d352ff..c1d48fd702b4 100644 --- a/packages/firebase_vertexai/firebase_vertexai/lib/firebase_vertexai.dart +++ b/packages/firebase_vertexai/firebase_vertexai/lib/firebase_vertexai.dart @@ -12,26 +12,58 @@ // See the License for the specific language governing permissions and // limitations under the License. -library firebase_vertexai; - -import 'dart:async'; -import 'dart:convert'; -import 'dart:typed_data'; - -import 'package:firebase_app_check/firebase_app_check.dart'; -import 'package:firebase_auth/firebase_auth.dart'; -import 'package:firebase_core/firebase_core.dart'; -import 'package:firebase_core_platform_interface/firebase_core_platform_interface.dart' - show FirebasePluginPlatform; -import 'package:google_generative_ai/google_generative_ai.dart' as google_ai; -// ignore: implementation_imports, tightly coupled packages -import 'package:google_generative_ai/src/vertex_hooks.dart'; - -import 'src/vertex_version.dart'; - -part 'src/firebase_vertexai.dart'; -part 'src/vertex_api.dart'; -part 'src/vertex_chat.dart'; -part 'src/vertex_content.dart'; -part 'src/vertex_function_calling.dart'; -part 'src/vertex_model.dart'; +export 'src/firebase_vertexai.dart' + show + // TODO(next breaking): Remove defaultTimeout + defaultTimeout, + FirebaseVertexAI, + RequestOptions; +export 'src/vertex_api.dart' + show + BatchEmbedContentsResponse, + BlockReason, + Candidate, + CitationMetadata, + CitationSource, + ContentEmbedding, + CountTokensResponse, + // TODO(next breaking): Remove CountTokensResponseFields + CountTokensResponseFields, + EmbedContentRequest, + EmbedContentResponse, + FinishReason, + GenerateContentResponse, + GenerationConfig, + HarmBlockThreshold, + HarmCategory, + HarmProbability, + PromptFeedback, + SafetyRating, + SafetySetting, + TaskType, + // TODO(next breaking): Remove parse* methods + parseCountTokensResponse, + parseEmbedContentResponse, + parseGenerateContentResponse; +export 'src/vertex_chat.dart' show ChatSession, StartChatExtension; +export 'src/vertex_content.dart' + show + Content, + DataPart, + FileData, + FunctionCall, + FunctionResponse, + Part, + TextPart, + // TODO(next breaking): Remove parseContent + parseContent; +export 'src/vertex_function_calling.dart' + show + FunctionCallingConfig, + FunctionCallingMode, + FunctionDeclaration, + Schema, + SchemaType, + Tool, + ToolConfig; +export 'src/vertex_model.dart' show GenerativeModel; diff --git a/packages/firebase_vertexai/firebase_vertexai/lib/src/firebase_vertexai.dart b/packages/firebase_vertexai/firebase_vertexai/lib/src/firebase_vertexai.dart index a12a99ef5d1d..d0ebc258046f 100644 --- a/packages/firebase_vertexai/firebase_vertexai/lib/src/firebase_vertexai.dart +++ b/packages/firebase_vertexai/firebase_vertexai/lib/src/firebase_vertexai.dart @@ -12,7 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -part of firebase_vertexai; +import 'package:firebase_app_check/firebase_app_check.dart'; +import 'package:firebase_auth/firebase_auth.dart'; +import 'package:firebase_core/firebase_core.dart'; +import 'package:firebase_core_platform_interface/firebase_core_platform_interface.dart' + show FirebasePluginPlatform; + +import 'vertex_api.dart'; +import 'vertex_content.dart'; +import 'vertex_function_calling.dart'; +import 'vertex_model.dart'; const _defaultLocation = 'us-central1'; @@ -105,7 +114,7 @@ class FirebaseVertexAI extends FirebasePluginPlatform { Content? systemInstruction, List? tools, ToolConfig? toolConfig}) { - return GenerativeModel._( + return createGenerativeModel( model: model, app: app, appCheck: appCheck, diff --git a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_api.dart b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_api.dart index 88ec0b104220..5e8c7c4110de 100644 --- a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_api.dart +++ b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_api.dart @@ -12,19 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -part of firebase_vertexai; +import 'package:google_generative_ai/google_generative_ai.dart' as google_ai; +// ignore: implementation_imports, tightly coupled packages +import 'package:google_generative_ai/src/vertex_hooks.dart'; + +import 'vertex_content.dart'; /// Response for Count Tokens final class CountTokensResponse { /// Constructor CountTokensResponse(this.totalTokens, {this.totalBillableCharacters}); - factory CountTokensResponse._fromGoogleAICountTokensResponse( - google_ai.CountTokensResponse countTokensResponse) { - return CountTokensResponse( - countTokensResponse.totalTokens, - totalBillableCharacters: countTokensResponse.totalBillableCharacters, - ); - } /// The number of tokens that the `model` tokenizes the `prompt` into. /// @@ -37,6 +34,16 @@ final class CountTokensResponse { final int? totalBillableCharacters; } +/// Conversion utilities for [google_ai.CountTokensResponse]. +extension GoogleAICountTokensResponseConversion + on google_ai.CountTokensResponse { + /// Returns this response as a [CountTokensResponse]. + CountTokensResponse toVertex() => CountTokensResponse( + totalTokens, + totalBillableCharacters: totalBillableCharacters, + ); +} + /// Extension on [google_ai.CountTokensResponse] to access extra fields extension CountTokensResponseFields on google_ai.CountTokensResponse { /// Total billable Characters for the prompt. @@ -49,35 +56,12 @@ final class GenerateContentResponse { /// Constructor GenerateContentResponse(this.candidates, this.promptFeedback); - factory GenerateContentResponse._fromGoogleAIGenerateContentResponse( - google_ai.GenerateContentResponse generateContentResponse) => - GenerateContentResponse( - generateContentResponse.candidates - .map(Candidate._fromGoogleAICandidate) - .toList(), - generateContentResponse.promptFeedback != null - ? PromptFeedback._fromGoogleAIPromptFeedback( - generateContentResponse.promptFeedback!) - : null); - /// Candidate responses from the model. final List candidates; /// Returns the prompt's feedback related to the content filters. final PromptFeedback? promptFeedback; - /// Converts this response to a [GenerateContentResponse]. - - // ignore: unused_element - google_ai.GenerateContentResponse _toGoogleAIGenerateContentResponse() => - google_ai.GenerateContentResponse( - candidates - .map( - (candidate) => candidate._toGoogleAICandidate(), - ) - .toList(), - promptFeedback?._toGoogleAIPromptFeedback()); - /// The text content of the first part of the first of [candidates], if any. /// /// If the prompt was blocked, or the first candidate was finished for a reason @@ -133,22 +117,30 @@ final class GenerateContentResponse { const []; } +/// Conversion utilities for [google_ai.GenerateContentResponse]. +extension GoogleAIGenerateContentResponseConversion + on google_ai.GenerateContentResponse { + /// Returns this response as a [GenerateContentResponse]. + GenerateContentResponse toVertex() => GenerateContentResponse( + candidates.map((c) => c.toVertex()).toList(), + promptFeedback?.toVertex(), + ); +} + /// Response for Embed Content. final class EmbedContentResponse { /// Constructor EmbedContentResponse(this.embedding); - factory EmbedContentResponse._fromGoogleAIEmbedContentResponse( - google_ai.EmbedContentResponse embedContentResponse) => - EmbedContentResponse(ContentEmbedding._fromGoogleAIContentEmbedding( - embedContentResponse.embedding)); /// The embedding generated from the input content. final ContentEmbedding embedding; +} - /// Converts this response to a [EmbedContentResponse]. - // ignore: unused_element - google_ai.EmbedContentResponse _toGoogleAIEmbedContentResponse() => - google_ai.EmbedContentResponse(embedding._toGoogleAIContentEmbedding()); +/// Conversion utilities for [google_ai.EmbedContentResponse]. +extension GoogleAIEmbedContentResponseConversion + on google_ai.EmbedContentResponse { + /// Returns this response as a [EmbedContentResponse]. + EmbedContentResponse toVertex() => EmbedContentResponse(embedding.toVertex()); } /// Response for Embed Content in batch. @@ -156,17 +148,19 @@ final class BatchEmbedContentsResponse { /// Constructor BatchEmbedContentsResponse(this.embeddings); - factory BatchEmbedContentsResponse._fromGoogleAIBatchEmbedContentsResponse( - google_ai.BatchEmbedContentsResponse embedContentResponse) => - BatchEmbedContentsResponse(embedContentResponse.embeddings - .map(ContentEmbedding._fromGoogleAIContentEmbedding) - .toList()); - /// The embeddings generated from the input content for each request, in the /// same order as provided in the batch request. final List embeddings; } +/// Conversion utilities for [google_ai.BatchEmbedContentsResponse]. +extension GoogleAIBatchEmbedContentsResponseConversion + on google_ai.BatchEmbedContentsResponse { + /// Returns this response as a [BatchEmbedContentsResponse]. + BatchEmbedContentsResponse toVertex() => + BatchEmbedContentsResponse(embeddings.map((e) => e.toVertex()).toList()); +} + /// Request for Embed Content. final class EmbedContentRequest { /// Constructor @@ -191,47 +185,35 @@ final class EmbedContentRequest { if (title != null) 'title': title, if (model ?? defaultModel case final model?) 'model': model, }; +} +/// Conversion utilities for [EmbedContentRequest]. +extension EmbedContentRequestConversion on EmbedContentRequest { /// Converts this response to a [EmbedContentResponse]. - // ignore: unused_element - google_ai.EmbedContentRequest _toGoogleAIEmbedContentRequest() => - google_ai.EmbedContentRequest(content._toGoogleAIContent(), - taskType: taskType?._toGoogleAITaskType(), - title: title, - model: model); + google_ai.EmbedContentRequest toGoogleAI() => + google_ai.EmbedContentRequest(content.toGoogleAI(), + taskType: taskType?.toGoogleAI(), title: title, model: model); } /// An embedding, as defined by a list of values. final class ContentEmbedding { /// Constructor ContentEmbedding(this.values); - factory ContentEmbedding._fromGoogleAIContentEmbedding( - google_ai.ContentEmbedding contentEmbedding) => - ContentEmbedding(contentEmbedding.values); /// The embedding values. final List values; +} - /// Converts this embedding to a [google_ai.ContentEmbedding]. - google_ai.ContentEmbedding _toGoogleAIContentEmbedding() => - google_ai.ContentEmbedding(values); +/// Conversion utilities for [google_ai.ContentEmbedding]. +extension GoogleAIContentEmbeddingConversion on google_ai.ContentEmbedding { + /// Returns this embedding as a [ContentEmbedding]. + ContentEmbedding toVertex() => ContentEmbedding(values); } /// Feedback metadata of a prompt specified in a [GenerativeModel] request. final class PromptFeedback { /// Constructor PromptFeedback(this.blockReason, this.blockReasonMessage, this.safetyRatings); - factory PromptFeedback._fromGoogleAIPromptFeedback( - google_ai.PromptFeedback promptFeedback) => - PromptFeedback( - promptFeedback.blockReason != null - ? BlockReason._fromGoogleAIBlockReason(promptFeedback.blockReason!) - : null, - promptFeedback.blockReasonMessage, - promptFeedback.safetyRatings - .map(SafetyRating._fromGoogleAISafetyRating) - .toList(), - ); /// If set, the prompt was blocked and no candidates are returned. /// @@ -245,15 +227,15 @@ final class PromptFeedback { /// /// There is at most one rating per category. final List safetyRatings; +} - /// Converts this feedback to a [google_ai.PromptFeedback]. - google_ai.PromptFeedback _toGoogleAIPromptFeedback() => - google_ai.PromptFeedback( - blockReason?._toGoogleAIBlockReason(), +/// Conversion utilities for [google_ai.PromptFeedback]. +extension GoogleAIPromptFeedback on google_ai.PromptFeedback { + /// Returns this feedback a [PromptFeedback]. + PromptFeedback toVertex() => PromptFeedback( + blockReason?.toVertex(), blockReasonMessage, - safetyRatings - .map((safetyRating) => safetyRating._toGoogleAISafetyRating()) - .toList(), + safetyRatings.map((r) => r.toVertex()).toList(), ); } @@ -263,20 +245,6 @@ final class Candidate { /// Constructor Candidate(this.content, this.safetyRatings, this.citationMetadata, this.finishReason, this.finishMessage); - factory Candidate._fromGoogleAICandidate(google_ai.Candidate candidate) => - Candidate( - Content._fromGoogleAIContent(candidate.content), - candidate.safetyRatings - ?.map(SafetyRating._fromGoogleAISafetyRating) - .toList(), - candidate.citationMetadata != null - ? CitationMetadata._fromGoogleAICitationMetadata( - candidate.citationMetadata!) - : null, - candidate.finishReason != null - ? FinishReason._fromGoogleAIFinishReason(candidate.finishReason!) - : null, - candidate.finishMessage); /// Generated content returned from the model. final Content content; @@ -300,12 +268,16 @@ final class Candidate { /// Message for finish reason. final String? finishMessage; +} - google_ai.Candidate _toGoogleAICandidate() => google_ai.Candidate( - content._toGoogleAIContent(), - safetyRatings?.map((s) => s._toGoogleAISafetyRating()).toList(), - citationMetadata?._toGoogleAICitationMetadata(), - finishReason?._toGoogleAIFinishReason(), +/// Conversion utilities for [google_ai.Candidate]. +extension GooglAICandidateConversion on google_ai.Candidate { + /// Returns this candidate as a [Candidate]. + Candidate toVertex() => Candidate( + content.toVertex(), + safetyRatings?.map((r) => r.toVertex()).toList(), + citationMetadata?.toVertex(), + finishReason?.toVertex(), finishMessage, ); } @@ -319,23 +291,19 @@ final class Candidate { final class SafetyRating { /// Constructor SafetyRating(this.category, this.probability); - factory SafetyRating._fromGoogleAISafetyRating( - google_ai.SafetyRating safetyRating) => - SafetyRating( - HarmCategory._fromGoogleAIHarmCategory(safetyRating.category), - HarmProbability._fromGoogleAIHarmProbability( - safetyRating.probability)); /// The category for this rating. final HarmCategory category; /// The probability of harm for this content. final HarmProbability probability; +} - /// Converts this rating to a [google_ai.SafetyRating]. - google_ai.SafetyRating _toGoogleAISafetyRating() => google_ai.SafetyRating( - category._toGoogleAIHarmCategory(), - probability._toGoogleAIHarmProbability()); +/// Conversion utilities for [google_ai.SafetyRating]. +extension GoogleAISafetyRatingConversion on google_ai.SafetyRating { + /// Returns this safety rating as a [SafetyRating]. + SafetyRating toVertex() => + SafetyRating(category.toVertex(), probability.toVertex()); } /// The reason why a prompt was blocked. @@ -364,25 +332,6 @@ enum BlockReason { }; } - static BlockReason _fromGoogleAIBlockReason( - google_ai.BlockReason googleAIBlockReason) { - return switch (googleAIBlockReason) { - google_ai.BlockReason.unspecified => BlockReason.unspecified, - google_ai.BlockReason.safety => BlockReason.safety, - google_ai.BlockReason.other => BlockReason.other, - }; - } - - /// Converts this blocking reason to a [google_ai.BlockReason]. - - google_ai.BlockReason _toGoogleAIBlockReason() { - return switch (this) { - BlockReason.unspecified => google_ai.BlockReason.unspecified, - BlockReason.safety => google_ai.BlockReason.safety, - BlockReason.other => google_ai.BlockReason.other - }; - } - final String _jsonString; /// Convert to json format @@ -392,6 +341,16 @@ enum BlockReason { String toString() => name; } +/// Conversion utilities for [google_ai.BlockReason]. +extension GoogleAIBlockReasonConversion on google_ai.BlockReason { + /// Returns this block reason as a [BlockReason]. + BlockReason toVertex() => switch (this) { + google_ai.BlockReason.unspecified => BlockReason.unspecified, + google_ai.BlockReason.safety => BlockReason.safety, + google_ai.BlockReason.other => BlockReason.other, + }; +} + /// The category of a rating. /// /// These categories cover various kinds of harms that developers may wish to @@ -415,16 +374,6 @@ enum HarmCategory { dangerousContent('HARM_CATEGORY_DANGEROUS_CONTENT'); const HarmCategory(this._jsonString); - factory HarmCategory._fromGoogleAIHarmCategory( - google_ai.HarmCategory harmCategory) { - return switch (harmCategory) { - google_ai.HarmCategory.unspecified => HarmCategory.unspecified, - google_ai.HarmCategory.harassment => HarmCategory.harassment, - google_ai.HarmCategory.hateSpeech => HarmCategory.hateSpeech, - google_ai.HarmCategory.sexuallyExplicit => HarmCategory.sexuallyExplicit, - google_ai.HarmCategory.dangerousContent => HarmCategory.dangerousContent, - }; - } static HarmCategory _parseValue(Object jsonObject) { return switch (jsonObject) { 'HARM_CATEGORY_UNSPECIFIED' => HarmCategory.unspecified, @@ -443,10 +392,26 @@ enum HarmCategory { /// Convert to json format. String toJson() => _jsonString; +} - /// Converts this harm category to a [google_ai.HarmCategory]. +/// Conversion utilities for [google_ai.HarmCategory]. +extension GoogleAIHarmCategoryConversion on google_ai.HarmCategory { + /// Returns this harm category as a [HarmCategory]. + HarmCategory toVertex() => switch (this) { + google_ai.HarmCategory.unspecified => HarmCategory.unspecified, + google_ai.HarmCategory.harassment => HarmCategory.harassment, + google_ai.HarmCategory.hateSpeech => HarmCategory.hateSpeech, + google_ai.HarmCategory.sexuallyExplicit => + HarmCategory.sexuallyExplicit, + google_ai.HarmCategory.dangerousContent => + HarmCategory.dangerousContent, + }; +} - google_ai.HarmCategory _toGoogleAIHarmCategory() { +/// Conversion utilities for [HarmCategory]. +extension HarmCategoryConversion on HarmCategory { + /// Returns this harm category as a [google_ai.HarmCategory]. + google_ai.HarmCategory toGoogleAI() { return switch (this) { HarmCategory.unspecified => google_ai.HarmCategory.unspecified, HarmCategory.harassment => google_ai.HarmCategory.harassment, @@ -479,16 +444,6 @@ enum HarmProbability { const HarmProbability(this._jsonString); - factory HarmProbability._fromGoogleAIHarmProbability( - google_ai.HarmProbability harmProbability) { - return switch (harmProbability) { - google_ai.HarmProbability.unspecified => HarmProbability.unspecified, - google_ai.HarmProbability.negligible => HarmProbability.negligible, - google_ai.HarmProbability.low => HarmProbability.low, - google_ai.HarmProbability.medium => HarmProbability.medium, - google_ai.HarmProbability.high => HarmProbability.high, - }; - } static HarmProbability _parseValue(Object jsonObject) { return switch (jsonObject) { 'UNSPECIFIED' => HarmProbability.unspecified, @@ -501,16 +456,6 @@ enum HarmProbability { }; } - google_ai.HarmProbability _toGoogleAIHarmProbability() { - return switch (this) { - HarmProbability.unspecified => google_ai.HarmProbability.unspecified, - HarmProbability.negligible => google_ai.HarmProbability.negligible, - HarmProbability.low => google_ai.HarmProbability.low, - HarmProbability.medium => google_ai.HarmProbability.medium, - HarmProbability.high => google_ai.HarmProbability.high, - }; - } - final String _jsonString; /// Convert to json format. @@ -520,32 +465,38 @@ enum HarmProbability { String toString() => name; } +/// Conversion utilities for [google_ai.HarmProbability]. +extension GoogleAIHarmProbabilityConverison on google_ai.HarmProbability { + /// Returns this harm probability as a [HarmProbability]. + HarmProbability toVertex() => switch (this) { + google_ai.HarmProbability.unspecified => HarmProbability.unspecified, + google_ai.HarmProbability.negligible => HarmProbability.negligible, + google_ai.HarmProbability.low => HarmProbability.low, + google_ai.HarmProbability.medium => HarmProbability.medium, + google_ai.HarmProbability.high => HarmProbability.high, + }; +} + /// Source attributions for a piece of content. final class CitationMetadata { /// Constructor CitationMetadata(this.citationSources); - factory CitationMetadata._fromGoogleAICitationMetadata( - google_ai.CitationMetadata citationMetadata) => - CitationMetadata(citationMetadata.citationSources - .map(CitationSource._fromGoogleAICitationSource) - .toList()); /// Citations to sources for a specific response. final List citationSources; +} - google_ai.CitationMetadata _toGoogleAICitationMetadata() => - google_ai.CitationMetadata( - citationSources.map((e) => e._toGoogleAICitationSource()).toList()); +/// Conversion utilities for [google_ai.CitationMetadata]. +extension GoogleAICitationMetadataConversion on google_ai.CitationMetadata { + /// Returns this citation metadata as a [CitationMetadata]. + CitationMetadata toVertex() => + CitationMetadata(citationSources.map((s) => s.toVertex()).toList()); } /// Citation to a source for a portion of a specific response. final class CitationSource { /// Constructor CitationSource(this.startIndex, this.endIndex, this.uri, this.license); - factory CitationSource._fromGoogleAICitationSource( - google_ai.CitationSource source) => - CitationSource( - source.startIndex, source.endIndex, source.uri, source.license); /// Start of segment of the response that is attributed to this source. /// @@ -562,9 +513,13 @@ final class CitationSource { /// /// License info is required for code citations. final String? license; +} - google_ai.CitationSource _toGoogleAICitationSource() => - google_ai.CitationSource(startIndex, endIndex, uri, license); +/// Conversion utilities for [google_ai.CitationSource]. +extension GoogleAICitationSourceConversion on google_ai.CitationSource { + /// Returns this citation source as a [CitationSource]. + CitationSource toVertex() => + CitationSource(startIndex, endIndex, uri, license); } /// Reason why a model stopped generating tokens. @@ -591,29 +546,6 @@ enum FinishReason { const FinishReason(this._jsonString); - factory FinishReason._fromGoogleAIFinishReason( - google_ai.FinishReason finishReason) { - return switch (finishReason) { - google_ai.FinishReason.unspecified => FinishReason.unspecified, - google_ai.FinishReason.stop => FinishReason.stop, - google_ai.FinishReason.maxTokens => FinishReason.maxTokens, - google_ai.FinishReason.safety => FinishReason.safety, - google_ai.FinishReason.recitation => FinishReason.recitation, - google_ai.FinishReason.other => FinishReason.other, - }; - } - - google_ai.FinishReason _toGoogleAIFinishReason() { - return switch (this) { - FinishReason.unspecified => google_ai.FinishReason.unspecified, - FinishReason.stop => google_ai.FinishReason.stop, - FinishReason.maxTokens => google_ai.FinishReason.maxTokens, - FinishReason.safety => google_ai.FinishReason.safety, - FinishReason.recitation => google_ai.FinishReason.recitation, - FinishReason.other => google_ai.FinishReason.other, - }; - } - final String _jsonString; /// Convert to json format @@ -635,6 +567,19 @@ enum FinishReason { String toString() => name; } +/// Conversion utilities for [google_ai.FinishReason]. +extension GoogleAIFinishReasonConversion on google_ai.FinishReason { + /// Returns this finish reason as a [FinishReason]. + FinishReason toVertex() => switch (this) { + google_ai.FinishReason.unspecified => FinishReason.unspecified, + google_ai.FinishReason.stop => FinishReason.stop, + google_ai.FinishReason.maxTokens => FinishReason.maxTokens, + google_ai.FinishReason.safety => FinishReason.safety, + google_ai.FinishReason.recitation => FinishReason.recitation, + google_ai.FinishReason.other => FinishReason.other, + }; +} + /// Safety setting, affecting the safety-blocking behavior. /// /// Passing a safety setting for a category changes the allowed probability that @@ -646,7 +591,7 @@ final class SafetySetting { factory SafetySetting._fromGoogleAISafetySetting( google_ai.SafetySetting setting) => SafetySetting( - HarmCategory._fromGoogleAIHarmCategory(setting.category), + setting.category.toVertex(), HarmBlockThreshold._fromGoogleAIHarmBlockThreshold( setting.threshold)); @@ -656,15 +601,18 @@ final class SafetySetting { /// Controls the probability threshold at which harm is blocked. final HarmBlockThreshold threshold; - google_ai.SafetySetting _toGoogleAISafetySetting() => google_ai.SafetySetting( - category._toGoogleAIHarmCategory(), - threshold._toGoogleAIHarmBlockThreshold()); - /// Convert to json format. Object toJson() => {'category': category.toJson(), 'threshold': threshold.toJson()}; } +/// Conversion utilities for [SafetySetting]. +extension SafetySettingConversion on SafetySetting { + /// Returns this safety setting as a [google_ai.SafetySetting]. + google_ai.SafetySetting toGoogleAI() => + google_ai.SafetySetting(category.toGoogleAI(), threshold.toGoogleAI()); +} + /// Probability of harm which causes content to be blocked. /// /// When provided in [SafetySetting.threshold], a predicted harm probability at @@ -712,7 +660,17 @@ enum HarmBlockThreshold { final String _jsonString; - google_ai.HarmBlockThreshold _toGoogleAIHarmBlockThreshold() { + @override + String toString() => name; + + /// Convert to json format. + Object toJson() => _jsonString; +} + +/// Conversion utilities for [HarmBlockThreshold]. +extension HarmBlockThresholdConversion on HarmBlockThreshold { + /// Returns this block threshold as a [toGoogleAI()]. + google_ai.HarmBlockThreshold toGoogleAI() { return switch (this) { HarmBlockThreshold.unspecified => google_ai.HarmBlockThreshold.unspecified, @@ -722,12 +680,19 @@ enum HarmBlockThreshold { HarmBlockThreshold.none => google_ai.HarmBlockThreshold.none, }; } +} - @override - String toString() => name; - - /// Convert to json format. - Object toJson() => _jsonString; +/// Conversion utilities for [google_ai.HarmBlockThreshold]. +extension GoogleAIHarmBlockThresholdConversion on google_ai.HarmBlockThreshold { + /// Returns this harm block threshold as a [HarmBlockThreshold]. + HarmBlockThreshold toVertex() => switch (this) { + google_ai.HarmBlockThreshold.unspecified => + HarmBlockThreshold.unspecified, + google_ai.HarmBlockThreshold.low => HarmBlockThreshold.low, + google_ai.HarmBlockThreshold.medium => HarmBlockThreshold.medium, + google_ai.HarmBlockThreshold.high => HarmBlockThreshold.high, + google_ai.HarmBlockThreshold.none => HarmBlockThreshold.none, + }; } /// Configuration options for model generation and outputs. @@ -819,9 +784,12 @@ final class GenerationConfig { if (responseMimeType case final responseMimeType?) 'responseMimeType': responseMimeType, }; +} - google_ai.GenerationConfig _toGoogleAIGenerationConfig() => - google_ai.GenerationConfig( +/// Conversion utilities for [GenerationConfig]. +extension GenerationConfigConversion on GenerationConfig { + /// Returns this generation config as a [google_ai.GenerationConfig]. + google_ai.GenerationConfig toGoogleAI() => google_ai.GenerationConfig( candidateCount: candidateCount, stopSequences: stopSequences, maxOutputTokens: maxOutputTokens, @@ -882,16 +850,19 @@ enum TaskType { /// Convert to json format Object toJson() => _jsonString; - google_ai.TaskType _toGoogleAITaskType() { - return switch (this) { - TaskType.unspecified => google_ai.TaskType.unspecified, - TaskType.retrievalQuery => google_ai.TaskType.retrievalQuery, - TaskType.retrievalDocument => google_ai.TaskType.retrievalDocument, - TaskType.semanticSimilarity => google_ai.TaskType.semanticSimilarity, - TaskType.classification => google_ai.TaskType.classification, - TaskType.clustering => google_ai.TaskType.clustering, - }; - } +} + +/// Conversion utilities for [TaskType]. +extension TaskTypeConversion on TaskType { + /// Returns this task type as a [google_ai.TaskType]. + google_ai.TaskType toGoogleAI() => switch (this) { + TaskType.unspecified => google_ai.TaskType.unspecified, + TaskType.retrievalQuery => google_ai.TaskType.retrievalQuery, + TaskType.retrievalDocument => google_ai.TaskType.retrievalDocument, + TaskType.semanticSimilarity => google_ai.TaskType.semanticSimilarity, + TaskType.classification => google_ai.TaskType.classification, + TaskType.clustering => google_ai.TaskType.clustering, + }; } /// Parse to [GenerateContentResponse] from json object. diff --git a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_chat.dart b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_chat.dart index af2cc2a14212..e7257dc00eb3 100644 --- a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_chat.dart +++ b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_chat.dart @@ -12,7 +12,13 @@ // // See the License for the specific language governing permissions and // // limitations under the License. -part of firebase_vertexai; +import 'dart:async'; + +import 'package:google_generative_ai/google_generative_ai.dart' as google_ai; + +import 'vertex_api.dart'; +import 'vertex_content.dart'; +import 'vertex_model.dart'; /// A back-and-forth chat with a generative model. /// @@ -25,15 +31,14 @@ final class ChatSession { ChatSession._(this._history, List? _safetySettings, GenerationConfig? _generationConfig, GenerativeModel _model) - : _googleAIChatSession = _model._googleAIModel.startChat( - history: _history.map((e) => e._toGoogleAIContent()).toList(), + : _googleAIChatSession = _model.googleAIModel.startChat( + history: _history.map((e) => e.toGoogleAI()).toList(), safetySettings: _safetySettings != null ? _safetySettings - .map((setting) => setting._toGoogleAISafetySetting()) + .map((setting) => setting.toGoogleAI()) .toList() : [], - generationConfig: - GenerativeModel._googleAIGenerationConfig(_generationConfig)); + generationConfig: _generationConfig?.toGoogleAI()); final List _history; final google_ai.ChatSession _googleAIChatSession; @@ -61,8 +66,8 @@ final class ChatSession { /// be reflected in the history sent for this message. Future sendMessage(Content message) async { return _googleAIChatSession - .sendMessage(message._toGoogleAIContent()) - .then(GenerateContentResponse._fromGoogleAIGenerateContentResponse); + .sendMessage(message.toGoogleAI()) + .then((r) => r.toVertex()); } /// Continues the chat with a new [message]. @@ -84,8 +89,8 @@ final class ChatSession { /// and response and allowing pending messages to be sent. Stream sendMessageStream(Content message) { return _googleAIChatSession - .sendMessageStream(message._toGoogleAIContent()) - .map(GenerateContentResponse._fromGoogleAIGenerateContentResponse); + .sendMessageStream(message.toGoogleAI()) + .map((r) => r.toVertex()); } } diff --git a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_content.dart b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_content.dart index 9990b0b9b8c2..ddf21929d90a 100644 --- a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_content.dart +++ b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_content.dart @@ -12,14 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -part of firebase_vertexai; +import 'dart:convert'; +import 'dart:typed_data'; + +import 'package:google_generative_ai/google_generative_ai.dart' as google_ai; /// The base structured datatype containing multi-part content of a message. final class Content { /// Constructor Content(this.role, this.parts); - factory Content._fromGoogleAIContent(google_ai.Content content) => - Content(content.role, content.parts.map(Part._fromGoogleAIPart).toList()); /// The producer of the content. /// @@ -59,10 +60,22 @@ final class Content { if (role case final role?) 'role': role, 'parts': parts.map((p) => p.toJson()).toList() }; - google_ai.Content _toGoogleAIContent() => +} + +/// Conversion utilities for [Content]. +extension ContentConversion on Content { + /// Returns this content as a [google_ai.Content]. + google_ai.Content toGoogleAI() => google_ai.Content(role, parts.map((p) => p.toPart()).toList()); } +/// Conversion utilities for [google_ai.Content]. +extension GoogleAIContentConversion on google_ai.Content { + /// Returns this content as a [Content]. + Content toVertex() => + Content(role, parts.map(Part._fromGoogleAIPart).toList()); +} + /// Parse the [Content] from json object. Content parseContent(Object jsonObject) { return switch (jsonObject) { diff --git a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_function_calling.dart b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_function_calling.dart index e6ccb372ef91..31deb8293c72 100644 --- a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_function_calling.dart +++ b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_function_calling.dart @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -part of firebase_vertexai; +import 'package:google_generative_ai/google_generative_ai.dart' as google_ai; /// Tool details that the model may use to generate a response. /// @@ -39,8 +39,12 @@ final class Tool { 'functionDeclarations': functionDeclarations.map((f) => f.toJson()).toList(), }; +} - google_ai.Tool _toGoogleAITool() => google_ai.Tool( +/// Conversion utilities for [Tool]. +extension ToolConversion on Tool { + /// Returns this tool as a [google_ai.Tool]. + google_ai.Tool toGoogleAI() => google_ai.Tool( functionDeclarations: functionDeclarations ?.map((f) => f._toGoogleAIToolFunctionDeclaration()) .toList(), @@ -97,7 +101,12 @@ final class ToolConfig { if (functionCallingConfig case final config?) 'functionCallingConfig': config.toJson(), }; - google_ai.ToolConfig _toGoogleAIToolConfig() => google_ai.ToolConfig( +} + +/// Conversion utilities for [ToolConfig]. +extension ToolConfigConversion on ToolConfig { + /// Returns this tool config as a [google_ai.ToolConfig]. + google_ai.ToolConfig toGoogleAI() => google_ai.ToolConfig( functionCallingConfig: functionCallingConfig?._toGoogleAIFunctionCallingConfig(), ); diff --git a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_model.dart b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_model.dart index 7280b2ea2a01..5bf5812a57e5 100644 --- a/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_model.dart +++ b/packages/firebase_vertexai/firebase_vertexai/lib/src/vertex_model.dart @@ -14,7 +14,19 @@ // ignore_for_file: use_late_for_private_fields_and_variables -part of firebase_vertexai; +import 'dart:async'; + +import 'package:firebase_app_check/firebase_app_check.dart'; +import 'package:firebase_auth/firebase_auth.dart'; +import 'package:firebase_core/firebase_core.dart'; +import 'package:google_generative_ai/google_generative_ai.dart' as google_ai; +// ignore: implementation_imports, tightly coupled packages +import 'package:google_generative_ai/src/vertex_hooks.dart'; + +import 'vertex_api.dart'; +import 'vertex_content.dart'; +import 'vertex_function_calling.dart'; +import 'vertex_version.dart'; const _baseUrl = 'firebaseml.googleapis.com'; const _apiVersion = 'v2beta'; @@ -54,16 +66,14 @@ final class GenerativeModel { baseUri: _vertexUri(app, location), requestHeaders: _firebaseTokens(appCheck, auth), safetySettings: safetySettings != null - ? safetySettings - .map((setting) => setting._toGoogleAISafetySetting()) - .toList() + ? safetySettings.map((setting) => setting.toGoogleAI()).toList() : [], - generationConfig: generationConfig?._toGoogleAIGenerationConfig(), - systemInstruction: systemInstruction?._toGoogleAIContent(), + generationConfig: generationConfig?.toGoogleAI(), + systemInstruction: systemInstruction?.toGoogleAI(), tools: tools != null - ? tools.map((tool) => tool._toGoogleAITool()).toList() + ? tools.map((tool) => tool.toGoogleAI()).toList() : [], - toolConfig: toolConfig?._toGoogleAIToolConfig(), + toolConfig: toolConfig?.toGoogleAI(), ); final FirebaseApp _firebaseApp; final google_ai.GenerativeModel _googleAIModel; @@ -87,7 +97,7 @@ final class GenerativeModel { if (config == null) { return google_ai.GenerationConfig(); } else { - return config._toGoogleAIGenerationConfig(); + return config.toGoogleAI(); } } @@ -113,11 +123,6 @@ final class GenerativeModel { }; } - static google_ai.GenerationConfig? _googleAIGenerationConfig( - GenerationConfig? config) { - return config?._toGoogleAIGenerationConfig(); - } - /// Generates content responding to [prompt]. /// /// Sends a "generateContent" API request for the configured model, @@ -132,18 +137,15 @@ final class GenerativeModel { {List? safetySettings, GenerationConfig? generationConfig}) async { Iterable googlePrompt = - prompt.map((content) => content._toGoogleAIContent()); + prompt.map((content) => content.toGoogleAI()); List googleSafetySettings = safetySettings != null - ? safetySettings - .map((setting) => setting._toGoogleAISafetySetting()) - .toList() + ? safetySettings.map((setting) => setting.toGoogleAI()).toList() : []; - return _googleAIModel - .generateContent(googlePrompt, - safetySettings: googleSafetySettings, - generationConfig: - _convertGenerationConfig(generationConfig, _firebaseApp)) - .then(GenerateContentResponse._fromGoogleAIGenerateContentResponse); + final response = await _googleAIModel.generateContent(googlePrompt, + safetySettings: googleSafetySettings, + generationConfig: + _convertGenerationConfig(generationConfig, _firebaseApp)); + return response.toVertex(); } /// Generates a stream of content responding to [prompt]. @@ -163,15 +165,12 @@ final class GenerativeModel { {List? safetySettings, GenerationConfig? generationConfig}) { return _googleAIModel - .generateContentStream( - prompt.map((content) => content._toGoogleAIContent()), + .generateContentStream(prompt.map((content) => content.toGoogleAI()), safetySettings: safetySettings != null - ? safetySettings - .map((setting) => setting._toGoogleAISafetySetting()) - .toList() + ? safetySettings.map((setting) => setting.toGoogleAI()).toList() : [], - generationConfig: generationConfig?._toGoogleAIGenerationConfig()) - .map(GenerateContentResponse._fromGoogleAIGenerateContentResponse); + generationConfig: generationConfig?.toGoogleAI()) + .map((r) => r.toVertex()); } /// Counts the total number of tokens in [contents]. @@ -193,8 +192,8 @@ final class GenerativeModel { /// ``` Future countTokens(Iterable contents) async { return _googleAIModel - .countTokens(contents.map((e) => e._toGoogleAIContent())) - .then(CountTokensResponse._fromGoogleAICountTokensResponse); + .countTokens(contents.map((e) => e.toGoogleAI())) + .then((r) => r.toVertex()); } /// Creates an embedding (list of float values) representing [content]. @@ -210,9 +209,9 @@ final class GenerativeModel { Future embedContent(Content content, {TaskType? taskType, String? title}) async { return _googleAIModel - .embedContent(content._toGoogleAIContent(), - taskType: taskType?._toGoogleAITaskType(), title: title) - .then(EmbedContentResponse._fromGoogleAIEmbedContentResponse); + .embedContent(content.toGoogleAI(), + taskType: taskType?.toGoogleAI(), title: title) + .then((r) => r.toVertex()); } /// Creates embeddings (list of float values) representing each content in @@ -232,9 +231,39 @@ final class GenerativeModel { Future batchEmbedContents( Iterable requests) async { return _googleAIModel - .batchEmbedContents( - requests.map((e) => e._toGoogleAIEmbedContentRequest())) - .then( - BatchEmbedContentsResponse._fromGoogleAIBatchEmbedContentsResponse); + .batchEmbedContents(requests.map((e) => e.toGoogleAI())) + .then((r) => r.toVertex()); } } + +/// Conversion utilities for [GenerativeModel]. +extension GoogleAIGenerativeModelConversion on GenerativeModel { + /// Return this model as a [google_ai.GenerativeModel]. + google_ai.GenerativeModel get googleAIModel => _googleAIModel; +} + +/// Returns a [GenerativeModel] using it's private constructor. +GenerativeModel createGenerativeModel({ + required FirebaseApp app, + required String location, + required String model, + Content? systemInstruction, + FirebaseAppCheck? appCheck, + FirebaseAuth? auth, + GenerationConfig? generationConfig, + List? safetySettings, + List? tools, + ToolConfig? toolConfig, +}) => + GenerativeModel._( + model: model, + app: app, + appCheck: appCheck, + auth: auth, + location: location, + safetySettings: safetySettings, + generationConfig: generationConfig, + systemInstruction: systemInstruction, + tools: tools, + toolConfig: toolConfig, + );