Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
Expand All @@ -111,7 +111,7 @@
"source": [
"from bigframes.ml.llm import GeminiTextGenerator\n",
"\n",
"model = GeminiTextGenerator(model_name=\"gemini-2.0-flash-001\")"
"model = GeminiTextGenerator(model_name=\"gemini-2.5-flash\")"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"id": "JAPoU8Sm5E6e"
},
"source": [
"# Use BigQuery DataFrames with Generative AI for code generation",
"# Use BigQuery DataFrames with Generative AI for code generation\n",
"\n",
"<table align=\"left\">\n",
"\n",
Expand Down Expand Up @@ -409,7 +409,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"metadata": {
"id": "sdjeXFwcHfl7"
},
Expand All @@ -430,7 +430,7 @@
"source": [
"from bigframes.ml.llm import GeminiTextGenerator\n",
"\n",
"model = GeminiTextGenerator(model_name=\"gemini-2.0-flash-001\")"
"model = GeminiTextGenerator(model_name=\"gemini-2.5-flash\")"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Use BigQuery DataFrames to cluster and characterize complaints",
"# Use BigQuery DataFrames to cluster and characterize complaints\n",
"\n",
"<table align=\"left\">\n",
"\n",
Expand Down Expand Up @@ -1593,7 +1593,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": null,
"metadata": {
"id": "mL5P0_3X04dE"
},
Expand All @@ -1614,7 +1614,7 @@
"source": [
"from bigframes.ml.llm import GeminiTextGenerator\n",
"\n",
"q_a_model = GeminiTextGenerator(model_name=\"gemini-2.0-flash-001\")"
"q_a_model = GeminiTextGenerator(model_name=\"gemini-2.5-flash\")"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"id": "EQbZKS7_ooET"
},
"source": [
"# Build a Vector Search application using BigQuery DataFrames (aka BigFrames)",
"# Build a Vector Search application using BigQuery DataFrames (aka BigFrames)\n",
"\n",
"<table align=\"left\">\n",
"\n",
Expand Down Expand Up @@ -1451,7 +1451,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
Expand Down Expand Up @@ -1487,7 +1487,7 @@
"source": [
"## gemini model\n",
"\n",
"llm_model = bf_llm.GeminiTextGenerator(model_name = \"gemini-2.0-flash-001\") ## replace with other model as needed"
"llm_model = bf_llm.GeminiTextGenerator(model_name = \"gemini-2.5-flash\") ## replace with other model as needed"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -480,7 +480,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": null,
"metadata": {
"id": "UW2fQ2k5Hsic"
},
Expand Down Expand Up @@ -570,7 +570,7 @@
],
"source": [
"# Define the model\n",
"model = GeminiTextGenerator(model_name=\"gemini-2.0-flash-001\")\n",
"model = GeminiTextGenerator(model_name=\"gemini-2.5-flash\")\n",
"\n",
"# Invoke LLM with prompt\n",
"response = predict(zero_shot_prompt, temperature = TEMPERATURE)\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -60,7 +60,7 @@
}
],
"source": [
"model = GeminiTextGenerator(model_name=\"gemini-2.0-flash-001\")"
"model = GeminiTextGenerator(model_name=\"gemini-2.5-flash\")"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1305,13 +1305,13 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# from bigframes.ml.llm import GeminiTextGenerator\n",
"\n",
"# model = GeminiTextGenerator(model_name=\"gemini-2.0-flash-001\")\n",
"# model = GeminiTextGenerator(model_name=\"gemini-2.5-flash\")\n",
"\n",
"# pred = model.predict(df)\n",
"# pred"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,7 @@
"outputs": [],
"source": [
"flattened[\"Transcription\"] = flattened[\"GCS Blob\"].blob.audio_transcribe(\n",
" model_name=\"gemini-2.0-flash-001\",\n",
" model_name=\"gemini-2.5-flash\",\n",
" verbose=True,\n",
")\n",
"flattened[\"Transcription\"]"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1292,7 +1292,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": null,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -1339,7 +1339,7 @@
"\n",
"transcribed_results = bbq.ai.generate(\n",
" prompt=(prompt_text, audio_runtime),\n",
" endpoint=\"gemini-2.0-flash-001\",\n",
" endpoint=\"gemini-2.5-flash\",\n",
" model_params={\"generationConfig\": {\"temperature\": 0.0}},\n",
")\n",
"\n",
Expand Down
8 changes: 4 additions & 4 deletions packages/bigframes/tests/system/large/blob/test_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -754,8 +754,8 @@ def test_blob_pdf_chunk_verbose(pdf_mm_df: bpd.DataFrame, bq_connection: str):
@pytest.mark.parametrize(
"model_name",
[
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
],
)
def test_blob_transcribe(
Expand Down Expand Up @@ -805,8 +805,8 @@ def test_blob_transcribe(
@pytest.mark.parametrize(
"model_name",
[
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
],
)
def test_blob_transcribe_verbose(
Expand Down
18 changes: 4 additions & 14 deletions packages/bigframes/tests/system/large/ml/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
Comment on lines 30 to 32
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

There is an inconsistency in model naming conventions within this file. This test uses unversioned aliases (e.g., gemini-2.5-flash), while other tests (e.g., test_llm_gemini_score at line 169) use specific versioned snapshots (e.g., gemini-2.5-flash-001). It is recommended to use a consistent naming scheme across the test suite to ensure clarity and reproducibility. This applies to the parametrizations in the subsequent tests (Diffs 2-5) as well.

        "gemini-2.5-pro",
        "gemini-2.5-flash",
        "gemini-2.5-flash-lite",
        "gemini-2.5-flash-001",
        "gemini-2.5-flash-lite-001"

Expand Down Expand Up @@ -56,8 +54,6 @@ def test_create_load_gemini_text_generator_model(
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
Expand All @@ -79,8 +75,6 @@ def test_gemini_text_generator_predict_default_params_success(
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
Expand All @@ -104,8 +98,6 @@ def test_gemini_text_generator_predict_with_params_success(
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
Expand All @@ -131,8 +123,6 @@ def test_gemini_text_generator_multi_cols_predict_success(
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
Expand Down Expand Up @@ -176,8 +166,8 @@ def test_gemini_text_generator_predict_output_schema_success(
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
),
)
def test_llm_gemini_score(llm_fine_tune_df_default_index, model_name):
Expand Down Expand Up @@ -205,8 +195,8 @@ def test_llm_gemini_score(llm_fine_tune_df_default_index, model_name):
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
),
)
def test_llm_gemini_pro_score_params(llm_fine_tune_df_default_index, model_name):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
),
)
@pytest.mark.flaky(retries=2)
Expand Down Expand Up @@ -65,7 +65,7 @@ def test_multimodal_embedding_generator_predict_default_params_success(

@pytest.mark.parametrize(
"model_name",
("gemini-2.0-flash-001",),
("gemini-2.5-flash",),
)
@pytest.mark.flaky(retries=2)
def test_gemini_text_generator_multimodal_structured_output(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def gemini_flash_model(session, bq_connection) -> llm.GeminiTextGenerator:
return llm.GeminiTextGenerator(
session=session,
connection_name=bq_connection,
model_name="gemini-2.0-flash-001",
model_name="gemini-2.5-flash",
)


Expand Down
6 changes: 3 additions & 3 deletions packages/bigframes/tests/system/load/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ def llm_remote_text_df(session, llm_remote_text_pandas_df):
@pytest.mark.parametrize(
"model_name",
(
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
),
)
def test_llm_gemini_configure_fit(
Expand Down Expand Up @@ -79,7 +79,7 @@ def test_llm_gemini_configure_fit(

@pytest.mark.flaky(retries=2)
def test_llm_gemini_w_ground_with_google_search(llm_remote_text_df):
model = llm.GeminiTextGenerator(model_name="gemini-2.0-flash-001", max_iterations=1)
model = llm.GeminiTextGenerator(model_name="gemini-2.5-flash", max_iterations=1)
df = model.predict(
llm_remote_text_df["prompt"],
ground_with_google_search=True,
Expand Down
Loading