From 1cfd3ae15c77fd63f54889af6feba47d5e67bd66 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Mon, 18 Dec 2023 08:44:29 -0600 Subject: [PATCH 1/9] SN-92 add basic foundry layout --- examples/foundry/foundry_basics.ipynb | 105 ++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 examples/foundry/foundry_basics.ipynb diff --git a/examples/foundry/foundry_basics.ipynb b/examples/foundry/foundry_basics.ipynb new file mode 100644 index 000000000..55f489299 --- /dev/null +++ b/examples/foundry/foundry_basics.ipynb @@ -0,0 +1,105 @@ +{ + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, + "cells": [ + { + "metadata": {}, + "source": [ + "\n", + " \n", + "" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Fix Below links" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Foundry overview\n", + "\n", + "This notebook is used to go over the basic of foundry through the Python SDK" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "Foundry incorporates foundational models into your Labelbox workflow. You can use Foundry to:\n", + "\n", + "* Predict (infer) labels from your data\n", + "* Compare the performance of different foundational models with your data and ontologies.\n", + "* Prototype, diagnose, and refine a machine learning app to solve specific business needs.\n", + "\n", + "Foundry creates model runs that predict data row annotations based on your input." + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "!pip install labelbox" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "import labelbox as lb\n", + "import random" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "# API Key and Client\n", + "\n", + "Provide a valid API key below in order to properly connect to the Labelbox Client." + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Add your API key\n", + "API_KEY = None\n", + "#To get your API key go to: Workspace settings -> API -> Create API Key\n", + "client = lb.Client(api_key=API_KEY)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [], + "cell_type": "markdown" + } + ] +} \ No newline at end of file From 590ce3ac2b6b15b3ea84d9c5dbfe151be691cc1e Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Mon, 18 Dec 2023 09:30:30 -0600 Subject: [PATCH 2/9] SN-92 added a bit more to foundry notebook --- examples/foundry/foundry_basics.ipynb | 177 +++++++++++++++++++++++++- 1 file changed, 175 insertions(+), 2 deletions(-) diff --git a/examples/foundry/foundry_basics.ipynb b/examples/foundry/foundry_basics.ipynb index 55f489299..e64910974 100644 --- a/examples/foundry/foundry_basics.ipynb +++ b/examples/foundry/foundry_basics.ipynb @@ -69,7 +69,7 @@ "metadata": {}, "source": [ "import labelbox as lb\n", - "import random" + "import uuid" ], "cell_type": "code", "outputs": [], @@ -88,7 +88,7 @@ "metadata": {}, "source": [ "# Add your API key\n", - "API_KEY = None\n", + "API_KEY = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjbG9vcmRpaGUwMDkyMDcza2Nvcm5jajdnIiwib3JnYW5pemF0aW9uSWQiOiJjbG9vcmRpZ3cwMDkxMDcza2M2cG9oeWFiIiwiYXBpS2V5SWQiOiJjbHE1OWd6M3MwMDRxMDcweDRwb3BmajV4Iiwic2VjcmV0IjoiOWE5ZWVmNDczNDI2ZDI2ZjUwOTU5ZDY4ZmZiNGJmMWMiLCJpYXQiOjE3MDI1NjIwMjYsImV4cCI6MjMzMzcxNDAyNn0.BsdKnIr8Np4eYxJ_6VILmuY-D6n2gUdvGKGvMHq9Eh4\"\n", "#To get your API key go to: Workspace settings -> API -> Create API Key\n", "client = lb.Client(api_key=API_KEY)" ], @@ -96,6 +96,179 @@ "outputs": [], "execution_count": null }, + { + "metadata": {}, + "source": [ + "# End-to-end example: Run foundry and send to annotate from catalog" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "## Step 1: Import data rows into catelog" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# send a sample image as data row for a dataset\n", + "global_key = str(uuid.uuid4())\n", + "\n", + "test_img_url = {\n", + " \"row_data\":\n", + " \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", + " \"global_key\":\n", + " global_key\n", + "}\n", + "\n", + "dataset = client.create_dataset(name=\"foundry-demo-dataset\")\n", + "task = dataset.create_data_rows([test_img_url])\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")\n", + "print(f\"Failed data rows: {task.failed_data_rows}\")" + ], + "cell_type": "code", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Errors: None\n", + "Failed data rows: None\n" + ] + } + ], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 2: Create/select an ontology that matches model\n", + "\n", + "Your project should have correct ontology setup with all the tools and classifications supported for your model and data type.\n", + "\n", + "For example, when using Amazon Rekognition you would need to create a bounding box annotation for your ontology since it only supports object detection. Likewise when using YOLOv8 you would need to create a classification annotation for your ontology since it only supports image classification. \n", + "\n", + "In this tutorial, we will use Amazon Rekognition to detect objects in an image dataset. In later tutorials we will explore other data types and different annotations." + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Create ontology with two bounding boxes that is included with Amazon Rekognition: Car and Person \n", + "ontology_builder = lb.OntologyBuilder(\n", + " classifications=[],\n", + " tools=[\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Car\"),\n", + " lb.Tool(tool=lb.Tool.Type.BBOX, name=\"Person\")\n", + " ]\n", + ")\n", + "\n", + "ontology = client.create_ontology(\"Image Bounding Box Annotation Demo Foundry\",\n", + " ontology_builder.asdict(),\n", + " media_type=lb.MediaType.Image)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 3: Create a labeling project\n", + "\n", + "Connect the ontology to the labeling project" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "project = client.create_project(name=\"Foundry Image Demo\",\n", + " media_type=lb.MediaType.Image)\n", + "\n", + "project.setup_editor(ontology)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 4: Create foundry application in UI\n", + "TODO: Most show setting up APP in UI" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "AMAZON_REKOGNITION_APP_ID = \"d6713735-88ec-4d40-8915-26e683e0e7e6\"" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 5: Run foundry app on data set\n" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "AMAZON_REKOGNITION_APP_ID = \"d6713735-88ec-4d40-8915-26e683e0e7e6\"\n", + "\n", + "task = client.run_foundry_app(model_run_name=f\"Amazon-{str(uuid.uuid4())}\",\n", + " data_rows=lb.GlobalKeys([global_key]),\n", + " app_id=AMAZON_REKOGNITION_APP_ID)\n", + "\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\") " + ], + "cell_type": "code", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Errors: None\n" + ] + } + ], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "#DELETE THIS LATER JUST TO TEST\n", + "\n", + "client.run_foundry_app\n", + "\n", + "client.send_to_annotate_from_catalog\n", + "\n", + "#below will be done last\n", + "lb.ModelRun.send_to_annotate_from_model" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, { "metadata": {}, "source": [], From 4786fa5916d30e548f2b36f625ce80d09103d9b4 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Mon, 18 Dec 2023 16:02:38 -0600 Subject: [PATCH 3/9] SN-92 finished up foundry notebook --- examples/foundry/foundry_basics.ipynb | 134 +++++++++++++++++--------- 1 file changed, 90 insertions(+), 44 deletions(-) diff --git a/examples/foundry/foundry_basics.ipynb b/examples/foundry/foundry_basics.ipynb index e64910974..13533eab7 100644 --- a/examples/foundry/foundry_basics.ipynb +++ b/examples/foundry/foundry_basics.ipynb @@ -12,23 +12,16 @@ ], "cell_type": "markdown" }, - { - "metadata": {}, - "source": [ - "# Fix Below links" - ], - "cell_type": "markdown" - }, { "metadata": {}, "source": [ "\n", - "\n", "\n", "\n", "\n", - "\n", "" ], @@ -59,7 +52,7 @@ { "metadata": {}, "source": [ - "!pip install labelbox" + "!pip install -q labelbox" ], "cell_type": "code", "outputs": [], @@ -69,6 +62,7 @@ "metadata": {}, "source": [ "import labelbox as lb\n", + "from labelbox.schema.conflict_resolution_strategy import ConflictResolutionStrategy\n", "import uuid" ], "cell_type": "code", @@ -88,7 +82,7 @@ "metadata": {}, "source": [ "# Add your API key\n", - "API_KEY = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjbG9vcmRpaGUwMDkyMDcza2Nvcm5jajdnIiwib3JnYW5pemF0aW9uSWQiOiJjbG9vcmRpZ3cwMDkxMDcza2M2cG9oeWFiIiwiYXBpS2V5SWQiOiJjbHE1OWd6M3MwMDRxMDcweDRwb3BmajV4Iiwic2VjcmV0IjoiOWE5ZWVmNDczNDI2ZDI2ZjUwOTU5ZDY4ZmZiNGJmMWMiLCJpYXQiOjE3MDI1NjIwMjYsImV4cCI6MjMzMzcxNDAyNn0.BsdKnIr8Np4eYxJ_6VILmuY-D6n2gUdvGKGvMHq9Eh4\"\n", + "API_KEY = \"\"\n", "#To get your API key go to: Workspace settings -> API -> Create API Key\n", "client = lb.Client(api_key=API_KEY)" ], @@ -131,16 +125,7 @@ "print(f\"Failed data rows: {task.failed_data_rows}\")" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: None\n", - "Failed data rows: None\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -148,7 +133,7 @@ "source": [ "## Step 2: Create/select an ontology that matches model\n", "\n", - "Your project should have correct ontology setup with all the tools and classifications supported for your model and data type.\n", + "Your project should have the correct ontology setup with all the tools and classifications supported for your model and data type.\n", "\n", "For example, when using Amazon Rekognition you would need to create a bounding box annotation for your ontology since it only supports object detection. Likewise when using YOLOv8 you would need to create a classification annotation for your ontology since it only supports image classification. \n", "\n", @@ -201,14 +186,23 @@ "metadata": {}, "source": [ "## Step 4: Create foundry application in UI\n", - "TODO: Most show setting up APP in UI" + "\n", + "Currently we do not support this workflow through the SDK\n", + "#### Workflow:\n", + "\n", + "1. Navigate to model and select ***Create*** > ***App***\n", + "\n", + "2. Select ***Amazon Rekognition*** and name your foundry application\n", + "\n", + "3. Customize your perimeters and then select ***Save & Create***" ], "cell_type": "markdown" }, { "metadata": {}, "source": [ - "AMAZON_REKOGNITION_APP_ID = \"d6713735-88ec-4d40-8915-26e683e0e7e6\"" + "#Select your foundry application inside the UI and copy the APP ID from the top right corner\n", + "AMAZON_REKOGNITION_APP_ID = \"\"" ], "cell_type": "code", "outputs": [], @@ -217,46 +211,54 @@ { "metadata": {}, "source": [ - "## Step 5: Run foundry app on data set\n" + "## Step 5: Run foundry app on data rows\n" ], "cell_type": "markdown" }, { "metadata": {}, "source": [ - "AMAZON_REKOGNITION_APP_ID = \"d6713735-88ec-4d40-8915-26e683e0e7e6\"\n", - "\n", "task = client.run_foundry_app(model_run_name=f\"Amazon-{str(uuid.uuid4())}\",\n", - " data_rows=lb.GlobalKeys([global_key]),\n", + " data_rows=lb.GlobalKeys(\n", + " [global_key] # Provide a list of global keys \n", + " ), \n", " app_id=AMAZON_REKOGNITION_APP_ID)\n", "\n", "task.wait_till_done()\n", "\n", - "print(f\"Errors: {task.errors}\") " + "print(f\"Errors: {task.errors}\") \n", + "\n", + "#Obtain model run ID from task\n", + "MODEL_RUN_ID = task.metadata[\"modelRunId\"]" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: None\n" - ] - } - ], + "outputs": [], "execution_count": null }, { "metadata": {}, "source": [ - "#DELETE THIS LATER JUST TO TEST\n", + "## Step 6: Map ontology through the UI\n", "\n", - "client.run_foundry_app\n", + "Currently we do not support this workflow through the SDK\n", "\n", - "client.send_to_annotate_from_catalog\n", + "#### Workflow\n", "\n", - "#below will be done last\n", - "lb.ModelRun.send_to_annotate_from_model" + "1. Navigate to your dataset you created for your model run\n", + "2. Select **Select all** in the top right corner\n", + "3. Select **Manage selection** > **Send to Annotate**\n", + "4. Specify the project we created from the project dropdown menu\n", + "5. Mark **Include model predictions** then scroll down and select **map**\n", + "6. Select the incoming ontology and matching ontology feature for both Car and Person\n", + "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Copy map ontology through the UI then paste JSON file here\n", + "predictions_ontology_mapping = \"\"" ], "cell_type": "code", "outputs": [], @@ -264,7 +266,51 @@ }, { "metadata": {}, - "source": [], + "source": [ + "## Step 6: Send model generated annotations from catalog to project\n", + "\n", + "### Parameters\n", + "\n", + "When you send predicted data rows to annotate from catalog, you may choose to include or exclude certain parameters, at a minimum a predictions_ontology_mapping will need to be provided:\n", + "\n", + "* predictions_ontology_mapping: A mapping of feature schema ids to feature schema ids\n", + "* exclude_data_rows_in_project: Excludes data rows that are already in the project. \n", + "* override_existing_annotations_rule: The strategy defining how to handle conflicts in classifications between the data rows that already exist in the project and incoming predictions from the source model run or annotations from the source project. \n", + " * Defaults to ConflictResolutionStrategy.KeepExisting\n", + " * Options include:\n", + " * ConflictResolutionStrategy.KeepExisting\n", + " * ConflictResolutionStrategy.OverrideWithPredictions\n", + "* param batch_priority: The priority of the batch.\n" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "model_run = client.get_model_run(MODEL_RUN_ID)\n", + "\n", + "send_to_annotations_params = {\n", + " \"predictions_ontology_mapping\": predictions_ontology_mapping,\n", + " \"exclude_data_rows_in_project\": False,\n", + " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithPredictions,\n", + " \"batch_priority\": 5,\n", + "}\n", + "\n", + "\n", + "task = model_run.send_to_annotate_from_model(\n", + " destination_project_id=project.uid,\n", + " task_queue_id=None, #ID of workflow task, set ID to None if you want to convert pre-labels to ground truths or obtain task queue id through project.task_queues().\n", + " batch_name=\"Foundry Demo Batch\",\n", + " data_rows=lb.GlobalKeys(\n", + " [global_key] # Provide a list of global keys from foundry app task\n", + " ),\n", + " params=send_to_annotations_params\n", + " )\n", + "\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ], "cell_type": "code", "outputs": [], "execution_count": null From 16260aa80db1db8e4fa5d8e32fd1914dee1d6843 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Fri, 22 Dec 2023 10:22:44 -0600 Subject: [PATCH 4/9] added object detection notebook --- examples/foundry/image_classification.ipynb | 352 ++++++++++++++++++ ...ry_basics.ipynb => object_detection.ipynb} | 61 ++- 2 files changed, 404 insertions(+), 9 deletions(-) create mode 100644 examples/foundry/image_classification.ipynb rename examples/foundry/{foundry_basics.ipynb => object_detection.ipynb} (89%) diff --git a/examples/foundry/image_classification.ipynb b/examples/foundry/image_classification.ipynb new file mode 100644 index 000000000..f818f5c7d --- /dev/null +++ b/examples/foundry/image_classification.ipynb @@ -0,0 +1,352 @@ +{ + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, + "cells": [ + { + "metadata": {}, + "source": [ + "\n", + " \n", + "" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Foundry overview\n", + "\n", + "This notebook is used to go over the basic of foundry through the Python SDK" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "Foundry incorporates foundational models into your Labelbox workflow. You can use Foundry to:\n", + "\n", + "* Predict (infer) labels from your data\n", + "* Compare the performance of different foundational models with your data and ontologies.\n", + "* Prototype, diagnose, and refine a machine learning app to solve specific business needs.\n", + "\n", + "Foundry creates model runs that predict data row annotations based on your input." + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "!pip install -q labelbox" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "import labelbox as lb\n", + "from labelbox.schema.conflict_resolution_strategy import ConflictResolutionStrategy\n", + "import uuid" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "# API Key and Client\n", + "\n", + "Provide a valid API key below in order to properly connect to the Labelbox Client." + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Add your API key\n", + "API_KEY = \"\"\n", + "#To get your API key go to: Workspace settings -> API -> Create API Key\n", + "client = lb.Client(api_key=API_KEY)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "# End-to-end example: Run foundry and send to annotate from catalog" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "## Step 1: Import data rows into catelog" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# send a sample images as data row for a dataset\n", + "data_rows = []\n", + "global_keys =[]\n", + "\n", + "for i in range(6):\n", + " global_key = str(uuid.uuid4())\n", + " data_rows.append({\n", + " \"row_data\":\n", + " f\"https://storage.googleapis.com/labelbox-datasets/Foundry_Notebooks/Image_Classification/image_{i}.jpeg\",\n", + " \"global_key\":\n", + " global_key\n", + " })\n", + " global_keys.append(global_key)\n", + "\n", + "\n", + "dataset = client.create_dataset(name=\"foundry-demo-dataset\")\n", + "task = dataset.create_data_rows(data_rows)\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")\n", + "print(f\"Failed data rows: {task.failed_data_rows}\")" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 2: Create/select an ontology that matches model\n", + "\n", + "Your project should have the correct ontology setup with all the tools and classifications supported for your model and data type.\n", + "\n", + "For example, when using Amazon Rekognition you would need to create a bounding box annotation for your ontology since it only supports object detection. Likewise when using YOLOv8 you would need to create a classification annotation for your ontology since it only supports image classification. \n", + "\n", + "In this tutorial, we will use YOLOv8 Classification to classify images in an dataset. " + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Create ontology with two image classification radio options that is included with YOLOv8 CLS: Computer and Phone\n", + "ontology_builder = lb.OntologyBuilder(\n", + " classifications=[\n", + " lb.Classification(\n", + " class_type=lb.Classification.Type.RADIO,\n", + " name=\"Image Classification Foundry Demo\",\n", + " options=([\n", + " lb.Option(value=\"Computer\"),\n", + " lb.Option(value=\"Phone\")\n", + " ]))\n", + " ],\n", + " tools=[]\n", + ")\n", + "\n", + "ontology = client.create_ontology(\"Image Classification Annotation Demo Foundry\",\n", + " ontology_builder.asdict(),\n", + " media_type=lb.MediaType.Image)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 3: Create a labeling project\n", + "\n", + "Connect the ontology to the labeling project" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "project = client.create_project(name=\"Foundry Image Classification Demo\",\n", + " media_type=lb.MediaType.Image)\n", + "\n", + "project.setup_editor(ontology)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 4: Create foundry application in UI\n", + "\n", + "Currently we do not support this workflow through the SDK\n", + "#### Workflow:\n", + "\n", + "1. Navigate to model and select ***Create*** > ***App***\n", + "\n", + "2. Select ***YOLOv8 Classification*** and name your foundry application\n", + "\n", + "3. Customize your perimeters and then select ***Save & Create***" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "#Select your foundry application inside the UI and copy the APP ID from the top right corner\n", + "YOLOV8_CLS_APP_ID = \"\"" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 5: Run foundry app on data rows\n" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "task = client.run_foundry_app(model_run_name=f\"YOLOv8-CLS-{str(uuid.uuid4())}\",\n", + " data_rows=lb.GlobalKeys(\n", + " global_keys # Provide a list of global keys \n", + " ), \n", + " app_id=YOLOV8_CLS_APP_ID)\n", + "\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\") \n", + "\n", + "#Obtain model run ID from task\n", + "MODEL_RUN_ID = task.metadata[\"modelRunId\"]" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 6: Map ontology through the UI\n", + "\n", + "Currently we do not support this workflow through the SDK\n", + "\n", + "#### Workflow\n", + "\n", + "1. Navigate to your dataset you created for your model run\n", + "2. Select **Select all** in the top right corner\n", + "3. Select **Manage selection** > **Send to Annotate**\n", + "4. Specify the project we created from the project dropdown menu\n", + "5. Mark **Include model predictions** then scroll down and select **Map**\n", + "6. Select the incoming ontology and matching ontology feature for both Computer and Phone\n", + "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Copy map ontology through the UI then paste JSON file here\n", + "PREDICTIONS_ONTOLOGY_MAPPING = {}" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 6: Send model generated annotations from catalog to annotate\n", + "\n", + "### Parameters\n", + "\n", + "When you send predicted data rows to annotate from catalog, you may choose to include or exclude certain parameters, at a minimum a predictions_ontology_mapping will need to be provided:\n", + "\n", + "* predictions_ontology_mapping: A mapping of feature schema ids to feature schema ids\n", + "* exclude_data_rows_in_project: Excludes data rows that are already in the project. \n", + "* override_existing_annotations_rule: The strategy defining how to handle conflicts in classifications between the data rows that already exist in the project and incoming predictions from the source model run or annotations from the source project. \n", + " * Defaults to ConflictResolutionStrategy.KeepExisting\n", + " * Options include:\n", + " * ConflictResolutionStrategy.KeepExisting\n", + " * ConflictResolutionStrategy.OverrideWithPredictions\n", + "* param batch_priority: The priority of the batch.\n" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "model_run = client.get_model_run(MODEL_RUN_ID)\n", + "\n", + "send_to_annotations_params = {\n", + " \"predictions_ontology_mapping\": PREDICTIONS_ONTOLOGY_MAPPING,\n", + " \"exclude_data_rows_in_project\": False,\n", + " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithPredictions,\n", + " \"batch_priority\": 5,\n", + "}\n", + "\n", + "\n", + "task = model_run.send_to_annotate_from_model(\n", + " destination_project_id=project.uid,\n", + " task_queue_id=None, #ID of workflow task, set ID to None if you want to convert pre-labels to ground truths or obtain task queue id through project.task_queues().\n", + " batch_name=\"Foundry Demo Batch\",\n", + " data_rows=lb.GlobalKeys(\n", + " global_keys # Provide a list of global keys from foundry app task\n", + " ),\n", + " params=send_to_annotations_params\n", + " )\n", + "\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Clean up" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# project.delete()\n", + "# dataset.delete()\n", + "# model_run.delete()" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [], + "cell_type": "markdown" + } + ] +} \ No newline at end of file diff --git a/examples/foundry/foundry_basics.ipynb b/examples/foundry/object_detection.ipynb similarity index 89% rename from examples/foundry/foundry_basics.ipynb rename to examples/foundry/object_detection.ipynb index 13533eab7..367b8f092 100644 --- a/examples/foundry/foundry_basics.ipynb +++ b/examples/foundry/object_detection.ipynb @@ -16,12 +16,12 @@ "metadata": {}, "source": [ "\n", - "\n", "\n", "\n", "\n", - "\n", "" ], @@ -125,7 +125,16 @@ "print(f\"Failed data rows: {task.failed_data_rows}\")" ], "cell_type": "code", - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Errors: None\n", + "Failed data rows: None\n" + ] + } + ], "execution_count": null }, { @@ -137,7 +146,7 @@ "\n", "For example, when using Amazon Rekognition you would need to create a bounding box annotation for your ontology since it only supports object detection. Likewise when using YOLOv8 you would need to create a classification annotation for your ontology since it only supports image classification. \n", "\n", - "In this tutorial, we will use Amazon Rekognition to detect objects in an image dataset. In later tutorials we will explore other data types and different annotations." + "In this tutorial, we will use Amazon Rekognition to detect objects in an image dataset. " ], "cell_type": "markdown" }, @@ -232,7 +241,15 @@ "MODEL_RUN_ID = task.metadata[\"modelRunId\"]" ], "cell_type": "code", - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Errors: None\n" + ] + } + ], "execution_count": null }, { @@ -248,7 +265,7 @@ "2. Select **Select all** in the top right corner\n", "3. Select **Manage selection** > **Send to Annotate**\n", "4. Specify the project we created from the project dropdown menu\n", - "5. Mark **Include model predictions** then scroll down and select **map**\n", + "5. Mark **Include model predictions** then scroll down and select **Map**\n", "6. Select the incoming ontology and matching ontology feature for both Car and Person\n", "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner" ], @@ -258,7 +275,7 @@ "metadata": {}, "source": [ "# Copy map ontology through the UI then paste JSON file here\n", - "predictions_ontology_mapping = \"\"" + "PREDICTIONS_ONTOLOGY_MAPPING = \"\"" ], "cell_type": "code", "outputs": [], @@ -267,7 +284,7 @@ { "metadata": {}, "source": [ - "## Step 6: Send model generated annotations from catalog to project\n", + "## Step 6: Send model generated annotations from catalog to annotate\n", "\n", "### Parameters\n", "\n", @@ -290,7 +307,7 @@ "model_run = client.get_model_run(MODEL_RUN_ID)\n", "\n", "send_to_annotations_params = {\n", - " \"predictions_ontology_mapping\": predictions_ontology_mapping,\n", + " \"predictions_ontology_mapping\": PREDICTIONS_ONTOLOGY_MAPPING,\n", " \"exclude_data_rows_in_project\": False,\n", " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithPredictions,\n", " \"batch_priority\": 5,\n", @@ -312,6 +329,32 @@ "print(f\"Errors: {task.errors}\")" ], "cell_type": "code", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Errors: None\n" + ] + } + ], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Clean up" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# project.delete()\n", + "# dataset.delete()\n", + "# model_run.delete()" + ], + "cell_type": "code", "outputs": [], "execution_count": null }, From 9e8ea3f8702825e610198eebaaee534145097676 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Tue, 2 Jan 2024 14:57:40 -0600 Subject: [PATCH 5/9] started text notebook --- examples/foundry/image_classification.ipynb | 4 +- examples/foundry/text_summarization.ipynb | 352 ++++++++++++++++++++ 2 files changed, 354 insertions(+), 2 deletions(-) create mode 100644 examples/foundry/text_summarization.ipynb diff --git a/examples/foundry/image_classification.ipynb b/examples/foundry/image_classification.ipynb index f818f5c7d..8339142d0 100644 --- a/examples/foundry/image_classification.ipynb +++ b/examples/foundry/image_classification.ipynb @@ -16,12 +16,12 @@ "metadata": {}, "source": [ "\n", - "\n", "\n", "\n", "\n", - "\n", "" ], diff --git a/examples/foundry/text_summarization.ipynb b/examples/foundry/text_summarization.ipynb new file mode 100644 index 000000000..8339142d0 --- /dev/null +++ b/examples/foundry/text_summarization.ipynb @@ -0,0 +1,352 @@ +{ + "nbformat": 4, + "nbformat_minor": 2, + "metadata": {}, + "cells": [ + { + "metadata": {}, + "source": [ + "\n", + " \n", + "" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Foundry overview\n", + "\n", + "This notebook is used to go over the basic of foundry through the Python SDK" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "Foundry incorporates foundational models into your Labelbox workflow. You can use Foundry to:\n", + "\n", + "* Predict (infer) labels from your data\n", + "* Compare the performance of different foundational models with your data and ontologies.\n", + "* Prototype, diagnose, and refine a machine learning app to solve specific business needs.\n", + "\n", + "Foundry creates model runs that predict data row annotations based on your input." + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "!pip install -q labelbox" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "import labelbox as lb\n", + "from labelbox.schema.conflict_resolution_strategy import ConflictResolutionStrategy\n", + "import uuid" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "# API Key and Client\n", + "\n", + "Provide a valid API key below in order to properly connect to the Labelbox Client." + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Add your API key\n", + "API_KEY = \"\"\n", + "#To get your API key go to: Workspace settings -> API -> Create API Key\n", + "client = lb.Client(api_key=API_KEY)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "# End-to-end example: Run foundry and send to annotate from catalog" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "## Step 1: Import data rows into catelog" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# send a sample images as data row for a dataset\n", + "data_rows = []\n", + "global_keys =[]\n", + "\n", + "for i in range(6):\n", + " global_key = str(uuid.uuid4())\n", + " data_rows.append({\n", + " \"row_data\":\n", + " f\"https://storage.googleapis.com/labelbox-datasets/Foundry_Notebooks/Image_Classification/image_{i}.jpeg\",\n", + " \"global_key\":\n", + " global_key\n", + " })\n", + " global_keys.append(global_key)\n", + "\n", + "\n", + "dataset = client.create_dataset(name=\"foundry-demo-dataset\")\n", + "task = dataset.create_data_rows(data_rows)\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")\n", + "print(f\"Failed data rows: {task.failed_data_rows}\")" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 2: Create/select an ontology that matches model\n", + "\n", + "Your project should have the correct ontology setup with all the tools and classifications supported for your model and data type.\n", + "\n", + "For example, when using Amazon Rekognition you would need to create a bounding box annotation for your ontology since it only supports object detection. Likewise when using YOLOv8 you would need to create a classification annotation for your ontology since it only supports image classification. \n", + "\n", + "In this tutorial, we will use YOLOv8 Classification to classify images in an dataset. " + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Create ontology with two image classification radio options that is included with YOLOv8 CLS: Computer and Phone\n", + "ontology_builder = lb.OntologyBuilder(\n", + " classifications=[\n", + " lb.Classification(\n", + " class_type=lb.Classification.Type.RADIO,\n", + " name=\"Image Classification Foundry Demo\",\n", + " options=([\n", + " lb.Option(value=\"Computer\"),\n", + " lb.Option(value=\"Phone\")\n", + " ]))\n", + " ],\n", + " tools=[]\n", + ")\n", + "\n", + "ontology = client.create_ontology(\"Image Classification Annotation Demo Foundry\",\n", + " ontology_builder.asdict(),\n", + " media_type=lb.MediaType.Image)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 3: Create a labeling project\n", + "\n", + "Connect the ontology to the labeling project" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "project = client.create_project(name=\"Foundry Image Classification Demo\",\n", + " media_type=lb.MediaType.Image)\n", + "\n", + "project.setup_editor(ontology)" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 4: Create foundry application in UI\n", + "\n", + "Currently we do not support this workflow through the SDK\n", + "#### Workflow:\n", + "\n", + "1. Navigate to model and select ***Create*** > ***App***\n", + "\n", + "2. Select ***YOLOv8 Classification*** and name your foundry application\n", + "\n", + "3. Customize your perimeters and then select ***Save & Create***" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "#Select your foundry application inside the UI and copy the APP ID from the top right corner\n", + "YOLOV8_CLS_APP_ID = \"\"" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 5: Run foundry app on data rows\n" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "task = client.run_foundry_app(model_run_name=f\"YOLOv8-CLS-{str(uuid.uuid4())}\",\n", + " data_rows=lb.GlobalKeys(\n", + " global_keys # Provide a list of global keys \n", + " ), \n", + " app_id=YOLOV8_CLS_APP_ID)\n", + "\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\") \n", + "\n", + "#Obtain model run ID from task\n", + "MODEL_RUN_ID = task.metadata[\"modelRunId\"]" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 6: Map ontology through the UI\n", + "\n", + "Currently we do not support this workflow through the SDK\n", + "\n", + "#### Workflow\n", + "\n", + "1. Navigate to your dataset you created for your model run\n", + "2. Select **Select all** in the top right corner\n", + "3. Select **Manage selection** > **Send to Annotate**\n", + "4. Specify the project we created from the project dropdown menu\n", + "5. Mark **Include model predictions** then scroll down and select **Map**\n", + "6. Select the incoming ontology and matching ontology feature for both Computer and Phone\n", + "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# Copy map ontology through the UI then paste JSON file here\n", + "PREDICTIONS_ONTOLOGY_MAPPING = {}" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Step 6: Send model generated annotations from catalog to annotate\n", + "\n", + "### Parameters\n", + "\n", + "When you send predicted data rows to annotate from catalog, you may choose to include or exclude certain parameters, at a minimum a predictions_ontology_mapping will need to be provided:\n", + "\n", + "* predictions_ontology_mapping: A mapping of feature schema ids to feature schema ids\n", + "* exclude_data_rows_in_project: Excludes data rows that are already in the project. \n", + "* override_existing_annotations_rule: The strategy defining how to handle conflicts in classifications between the data rows that already exist in the project and incoming predictions from the source model run or annotations from the source project. \n", + " * Defaults to ConflictResolutionStrategy.KeepExisting\n", + " * Options include:\n", + " * ConflictResolutionStrategy.KeepExisting\n", + " * ConflictResolutionStrategy.OverrideWithPredictions\n", + "* param batch_priority: The priority of the batch.\n" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "model_run = client.get_model_run(MODEL_RUN_ID)\n", + "\n", + "send_to_annotations_params = {\n", + " \"predictions_ontology_mapping\": PREDICTIONS_ONTOLOGY_MAPPING,\n", + " \"exclude_data_rows_in_project\": False,\n", + " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithPredictions,\n", + " \"batch_priority\": 5,\n", + "}\n", + "\n", + "\n", + "task = model_run.send_to_annotate_from_model(\n", + " destination_project_id=project.uid,\n", + " task_queue_id=None, #ID of workflow task, set ID to None if you want to convert pre-labels to ground truths or obtain task queue id through project.task_queues().\n", + " batch_name=\"Foundry Demo Batch\",\n", + " data_rows=lb.GlobalKeys(\n", + " global_keys # Provide a list of global keys from foundry app task\n", + " ),\n", + " params=send_to_annotations_params\n", + " )\n", + "\n", + "task.wait_till_done()\n", + "\n", + "print(f\"Errors: {task.errors}\")" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [ + "## Clean up" + ], + "cell_type": "markdown" + }, + { + "metadata": {}, + "source": [ + "# project.delete()\n", + "# dataset.delete()\n", + "# model_run.delete()" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "source": [], + "cell_type": "markdown" + } + ] +} \ No newline at end of file From 276b8206aa458f4c095ccefae2f1df8a60aecc7a Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Tue, 2 Jan 2024 15:59:25 -0600 Subject: [PATCH 6/9] added text_entities --- examples/foundry/image_classification.ipynb | 2 +- ...ummarization.ipynb => text_entities.ipynb} | 69 +++++++++---------- 2 files changed, 33 insertions(+), 38 deletions(-) rename examples/foundry/{text_summarization.ipynb => text_entities.ipynb} (81%) diff --git a/examples/foundry/image_classification.ipynb b/examples/foundry/image_classification.ipynb index 8339142d0..ea2065b5d 100644 --- a/examples/foundry/image_classification.ipynb +++ b/examples/foundry/image_classification.ipynb @@ -277,7 +277,7 @@ { "metadata": {}, "source": [ - "## Step 6: Send model generated annotations from catalog to annotate\n", + "## Step 7: Send model generated annotations from catalog to annotate\n", "\n", "### Parameters\n", "\n", diff --git a/examples/foundry/text_summarization.ipynb b/examples/foundry/text_entities.ipynb similarity index 81% rename from examples/foundry/text_summarization.ipynb rename to examples/foundry/text_entities.ipynb index 8339142d0..cfc4bc4b1 100644 --- a/examples/foundry/text_summarization.ipynb +++ b/examples/foundry/text_entities.ipynb @@ -16,12 +16,12 @@ "metadata": {}, "source": [ "\n", - "\n", "\n", "\n", "\n", - "\n", "" ], @@ -111,15 +111,14 @@ "data_rows = []\n", "global_keys =[]\n", "\n", - "for i in range(6):\n", - " global_key = str(uuid.uuid4())\n", - " data_rows.append({\n", - " \"row_data\":\n", - " f\"https://storage.googleapis.com/labelbox-datasets/Foundry_Notebooks/Image_Classification/image_{i}.jpeg\",\n", - " \"global_key\":\n", - " global_key\n", - " })\n", - " global_keys.append(global_key)\n", + "global_key = str(uuid.uuid4())\n", + "data_rows.append({\n", + " \"row_data\":\n", + " \"https://storage.googleapis.com/labelbox-datasets/Business_text/business_41.txt\",\n", + "\"global_key\":\n", + " global_key\n", + "})\n", + "global_keys.append(global_key)\n", "\n", "\n", "dataset = client.create_dataset(name=\"foundry-demo-dataset\")\n", @@ -140,9 +139,7 @@ "\n", "Your project should have the correct ontology setup with all the tools and classifications supported for your model and data type.\n", "\n", - "For example, when using Amazon Rekognition you would need to create a bounding box annotation for your ontology since it only supports object detection. Likewise when using YOLOv8 you would need to create a classification annotation for your ontology since it only supports image classification. \n", - "\n", - "In this tutorial, we will use YOLOv8 Classification to classify images in an dataset. " + "In this tutorial, we will use OpenAI GPT 3.5 to preform name-entity recognition from a sample story." ], "cell_type": "markdown" }, @@ -151,21 +148,18 @@ "source": [ "# Create ontology with two image classification radio options that is included with YOLOv8 CLS: Computer and Phone\n", "ontology_builder = lb.OntologyBuilder(\n", - " classifications=[\n", - " lb.Classification(\n", - " class_type=lb.Classification.Type.RADIO,\n", - " name=\"Image Classification Foundry Demo\",\n", - " options=([\n", - " lb.Option(value=\"Computer\"),\n", - " lb.Option(value=\"Phone\")\n", - " ]))\n", - " ],\n", - " tools=[]\n", + " classifications=[],\n", + " tools=[\n", + " lb.Tool(\n", + " tool=lb.Tool.Type.NER,\n", + " name=\"company names\"\n", + " )\n", + " ]\n", ")\n", "\n", - "ontology = client.create_ontology(\"Image Classification Annotation Demo Foundry\",\n", + "ontology = client.create_ontology(\"Text Entity Demo Foundry\",\n", " ontology_builder.asdict(),\n", - " media_type=lb.MediaType.Image)" + " media_type=lb.MediaType.Text)" ], "cell_type": "code", "outputs": [], @@ -183,8 +177,8 @@ { "metadata": {}, "source": [ - "project = client.create_project(name=\"Foundry Image Classification Demo\",\n", - " media_type=lb.MediaType.Image)\n", + "project = client.create_project(name=\"Foundry Text Entity Demo\",\n", + " media_type=lb.MediaType.Text)\n", "\n", "project.setup_editor(ontology)" ], @@ -202,9 +196,11 @@ "\n", "1. Navigate to model and select ***Create*** > ***App***\n", "\n", - "2. Select ***YOLOv8 Classification*** and name your foundry application\n", + "2. Select ***OpenAI GPT 3.5 Turbo*** and name your foundry application\n", + "\n", + "3. Click ***Select an ontology*** and select your ontology\n", "\n", - "3. Customize your perimeters and then select ***Save & Create***" + "4. Customize your perimeters and then select ***Save & Create***\n" ], "cell_type": "markdown" }, @@ -212,7 +208,7 @@ "metadata": {}, "source": [ "#Select your foundry application inside the UI and copy the APP ID from the top right corner\n", - "YOLOV8_CLS_APP_ID = \"\"" + "GPT3_APP_ID = \"\"" ], "cell_type": "code", "outputs": [], @@ -228,11 +224,11 @@ { "metadata": {}, "source": [ - "task = client.run_foundry_app(model_run_name=f\"YOLOv8-CLS-{str(uuid.uuid4())}\",\n", + "task = client.run_foundry_app(model_run_name=f\"OpenAI-GPT-3.5-{str(uuid.uuid4())}\",\n", " data_rows=lb.GlobalKeys(\n", " global_keys # Provide a list of global keys \n", " ), \n", - " app_id=YOLOV8_CLS_APP_ID)\n", + " app_id=GPT3_APP_ID)\n", "\n", "task.wait_till_done()\n", "\n", @@ -258,9 +254,8 @@ "2. Select **Select all** in the top right corner\n", "3. Select **Manage selection** > **Send to Annotate**\n", "4. Specify the project we created from the project dropdown menu\n", - "5. Mark **Include model predictions** then scroll down and select **Map**\n", - "6. Select the incoming ontology and matching ontology feature for both Computer and Phone\n", - "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner" + "5. Mark **Include model predictions** then scroll down and select **Edit Map**\n", + "6. Press the **Copy ontology mapping as JSON** in the top right corner" ], "cell_type": "markdown" }, @@ -277,7 +272,7 @@ { "metadata": {}, "source": [ - "## Step 6: Send model generated annotations from catalog to annotate\n", + "## Step 7: Send model generated annotations from catalog to annotate\n", "\n", "### Parameters\n", "\n", From 248433fa15ac6018367c65a8f621c56fb9ac73b0 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Thu, 4 Jan 2024 16:20:31 -0600 Subject: [PATCH 7/9] deleted some other notebooks --- examples/foundry/image_classification.ipynb | 352 -------------------- examples/foundry/object_detection.ipynb | 56 ++-- examples/foundry/text_entities.ipynb | 347 ------------------- 3 files changed, 36 insertions(+), 719 deletions(-) delete mode 100644 examples/foundry/image_classification.ipynb delete mode 100644 examples/foundry/text_entities.ipynb diff --git a/examples/foundry/image_classification.ipynb b/examples/foundry/image_classification.ipynb deleted file mode 100644 index ea2065b5d..000000000 --- a/examples/foundry/image_classification.ipynb +++ /dev/null @@ -1,352 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 2, - "metadata": {}, - "cells": [ - { - "metadata": {}, - "source": [ - "\n", - " \n", - "" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# Foundry overview\n", - "\n", - "This notebook is used to go over the basic of foundry through the Python SDK" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "Foundry incorporates foundational models into your Labelbox workflow. You can use Foundry to:\n", - "\n", - "* Predict (infer) labels from your data\n", - "* Compare the performance of different foundational models with your data and ontologies.\n", - "* Prototype, diagnose, and refine a machine learning app to solve specific business needs.\n", - "\n", - "Foundry creates model runs that predict data row annotations based on your input." - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "!pip install -q labelbox" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "import labelbox as lb\n", - "from labelbox.schema.conflict_resolution_strategy import ConflictResolutionStrategy\n", - "import uuid" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "# API Key and Client\n", - "\n", - "Provide a valid API key below in order to properly connect to the Labelbox Client." - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# Add your API key\n", - "API_KEY = \"\"\n", - "#To get your API key go to: Workspace settings -> API -> Create API Key\n", - "client = lb.Client(api_key=API_KEY)" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "# End-to-end example: Run foundry and send to annotate from catalog" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "## Step 1: Import data rows into catelog" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# send a sample images as data row for a dataset\n", - "data_rows = []\n", - "global_keys =[]\n", - "\n", - "for i in range(6):\n", - " global_key = str(uuid.uuid4())\n", - " data_rows.append({\n", - " \"row_data\":\n", - " f\"https://storage.googleapis.com/labelbox-datasets/Foundry_Notebooks/Image_Classification/image_{i}.jpeg\",\n", - " \"global_key\":\n", - " global_key\n", - " })\n", - " global_keys.append(global_key)\n", - "\n", - "\n", - "dataset = client.create_dataset(name=\"foundry-demo-dataset\")\n", - "task = dataset.create_data_rows(data_rows)\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")\n", - "print(f\"Failed data rows: {task.failed_data_rows}\")" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 2: Create/select an ontology that matches model\n", - "\n", - "Your project should have the correct ontology setup with all the tools and classifications supported for your model and data type.\n", - "\n", - "For example, when using Amazon Rekognition you would need to create a bounding box annotation for your ontology since it only supports object detection. Likewise when using YOLOv8 you would need to create a classification annotation for your ontology since it only supports image classification. \n", - "\n", - "In this tutorial, we will use YOLOv8 Classification to classify images in an dataset. " - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# Create ontology with two image classification radio options that is included with YOLOv8 CLS: Computer and Phone\n", - "ontology_builder = lb.OntologyBuilder(\n", - " classifications=[\n", - " lb.Classification(\n", - " class_type=lb.Classification.Type.RADIO,\n", - " name=\"Image Classification Foundry Demo\",\n", - " options=([\n", - " lb.Option(value=\"Computer\"),\n", - " lb.Option(value=\"Phone\")\n", - " ]))\n", - " ],\n", - " tools=[]\n", - ")\n", - "\n", - "ontology = client.create_ontology(\"Image Classification Annotation Demo Foundry\",\n", - " ontology_builder.asdict(),\n", - " media_type=lb.MediaType.Image)" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 3: Create a labeling project\n", - "\n", - "Connect the ontology to the labeling project" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "project = client.create_project(name=\"Foundry Image Classification Demo\",\n", - " media_type=lb.MediaType.Image)\n", - "\n", - "project.setup_editor(ontology)" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 4: Create foundry application in UI\n", - "\n", - "Currently we do not support this workflow through the SDK\n", - "#### Workflow:\n", - "\n", - "1. Navigate to model and select ***Create*** > ***App***\n", - "\n", - "2. Select ***YOLOv8 Classification*** and name your foundry application\n", - "\n", - "3. Customize your perimeters and then select ***Save & Create***" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "#Select your foundry application inside the UI and copy the APP ID from the top right corner\n", - "YOLOV8_CLS_APP_ID = \"\"" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 5: Run foundry app on data rows\n" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "task = client.run_foundry_app(model_run_name=f\"YOLOv8-CLS-{str(uuid.uuid4())}\",\n", - " data_rows=lb.GlobalKeys(\n", - " global_keys # Provide a list of global keys \n", - " ), \n", - " app_id=YOLOV8_CLS_APP_ID)\n", - "\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\") \n", - "\n", - "#Obtain model run ID from task\n", - "MODEL_RUN_ID = task.metadata[\"modelRunId\"]" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 6: Map ontology through the UI\n", - "\n", - "Currently we do not support this workflow through the SDK\n", - "\n", - "#### Workflow\n", - "\n", - "1. Navigate to your dataset you created for your model run\n", - "2. Select **Select all** in the top right corner\n", - "3. Select **Manage selection** > **Send to Annotate**\n", - "4. Specify the project we created from the project dropdown menu\n", - "5. Mark **Include model predictions** then scroll down and select **Map**\n", - "6. Select the incoming ontology and matching ontology feature for both Computer and Phone\n", - "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# Copy map ontology through the UI then paste JSON file here\n", - "PREDICTIONS_ONTOLOGY_MAPPING = {}" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 7: Send model generated annotations from catalog to annotate\n", - "\n", - "### Parameters\n", - "\n", - "When you send predicted data rows to annotate from catalog, you may choose to include or exclude certain parameters, at a minimum a predictions_ontology_mapping will need to be provided:\n", - "\n", - "* predictions_ontology_mapping: A mapping of feature schema ids to feature schema ids\n", - "* exclude_data_rows_in_project: Excludes data rows that are already in the project. \n", - "* override_existing_annotations_rule: The strategy defining how to handle conflicts in classifications between the data rows that already exist in the project and incoming predictions from the source model run or annotations from the source project. \n", - " * Defaults to ConflictResolutionStrategy.KeepExisting\n", - " * Options include:\n", - " * ConflictResolutionStrategy.KeepExisting\n", - " * ConflictResolutionStrategy.OverrideWithPredictions\n", - "* param batch_priority: The priority of the batch.\n" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "model_run = client.get_model_run(MODEL_RUN_ID)\n", - "\n", - "send_to_annotations_params = {\n", - " \"predictions_ontology_mapping\": PREDICTIONS_ONTOLOGY_MAPPING,\n", - " \"exclude_data_rows_in_project\": False,\n", - " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithPredictions,\n", - " \"batch_priority\": 5,\n", - "}\n", - "\n", - "\n", - "task = model_run.send_to_annotate_from_model(\n", - " destination_project_id=project.uid,\n", - " task_queue_id=None, #ID of workflow task, set ID to None if you want to convert pre-labels to ground truths or obtain task queue id through project.task_queues().\n", - " batch_name=\"Foundry Demo Batch\",\n", - " data_rows=lb.GlobalKeys(\n", - " global_keys # Provide a list of global keys from foundry app task\n", - " ),\n", - " params=send_to_annotations_params\n", - " )\n", - "\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Clean up" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# project.delete()\n", - "# dataset.delete()\n", - "# model_run.delete()" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [], - "cell_type": "markdown" - } - ] -} \ No newline at end of file diff --git a/examples/foundry/object_detection.ipynb b/examples/foundry/object_detection.ipynb index 367b8f092..8c3da1ab7 100644 --- a/examples/foundry/object_detection.ipynb +++ b/examples/foundry/object_detection.ipynb @@ -82,8 +82,8 @@ "metadata": {}, "source": [ "# Add your API key\n", - "API_KEY = \"\"\n", - "#To get your API key go to: Workspace settings -> API -> Create API Key\n", + "API_KEY = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjbG9vcmRpaGUwMDkyMDcza2Nvcm5jajdnIiwib3JnYW5pemF0aW9uSWQiOiJjbG9vcmRpZ3cwMDkxMDcza2M2cG9oeWFiIiwiYXBpS2V5SWQiOiJjbHE1OWd6M3MwMDRxMDcweDRwb3BmajV4Iiwic2VjcmV0IjoiOWE5ZWVmNDczNDI2ZDI2ZjUwOTU5ZDY4ZmZiNGJmMWMiLCJpYXQiOjE3MDI1NjIwMjYsImV4cCI6MjMzMzcxNDAyNn0.BsdKnIr8Np4eYxJ_6VILmuY-D6n2gUdvGKGvMHq9Eh4\"\n", + "# To get your API key go to: Workspace settings -> API -> Create API Key\n", "client = lb.Client(api_key=API_KEY)" ], "cell_type": "code", @@ -211,7 +211,7 @@ "metadata": {}, "source": [ "#Select your foundry application inside the UI and copy the APP ID from the top right corner\n", - "AMAZON_REKOGNITION_APP_ID = \"\"" + "AMAZON_REKOGNITION_APP_ID = \"990626af-4cbf-4a02-983c-e283b4e0758a\"" ], "cell_type": "code", "outputs": [], @@ -220,7 +220,9 @@ { "metadata": {}, "source": [ - "## Step 5: Run foundry app on data rows\n" + "## Step 5: Run foundry app on data rows\n", + "\n", + "This step is meant to generate annotations that can later be reused as pre-labels in a project. You must provide your app ID from the previous step for this method to run, please see the [Foundry Apps Guide](https://docs.labelbox.com/docs/foundry-apps#run-app-using-sdk) for more information.\n" ], "cell_type": "markdown" }, @@ -243,10 +245,18 @@ "cell_type": "code", "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: None\n" + "ename": "ValidationError", + "evalue": "1 validation error for App\ndescription\n none is not an allowed value (type=type_error.none.not_allowed)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[47], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m task \u001b[38;5;241m=\u001b[39m \u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_foundry_app\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_run_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mAmazon-\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[38;5;28;43mstr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43muuid\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43muuid4\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[43mdata_rows\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlb\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mGlobalKeys\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43mglobal_key\u001b[49m\u001b[43m]\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Provide a list of global keys \u001b[39;49;00m\n\u001b[1;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mapp_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mAMAZON_REKOGNITION_APP_ID\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 7\u001b[0m task\u001b[38;5;241m.\u001b[39mwait_till_done()\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mErrors: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtask\u001b[38;5;241m.\u001b[39merrors\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m) \n", + "File \u001b[0;32m~/repos/labelbox-python/labelbox/client.py:1943\u001b[0m, in \u001b[0;36mClient.run_foundry_app\u001b[0;34m(self, model_run_name, data_rows, app_id)\u001b[0m\n\u001b[1;32m 1934\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1935\u001b[0m \u001b[38;5;124;03mRun a foundry app\u001b[39;00m\n\u001b[1;32m 1936\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1940\u001b[0m \u001b[38;5;124;03m app_id (str): Foundry app to run predictions with\u001b[39;00m\n\u001b[1;32m 1941\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1942\u001b[0m foundry_client \u001b[38;5;241m=\u001b[39m FoundryClient(\u001b[38;5;28mself\u001b[39m)\n\u001b[0;32m-> 1943\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfoundry_client\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_app\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_run_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata_rows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mapp_id\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/repos/labelbox-python/labelbox/schema/foundry/foundry_client.py:79\u001b[0m, in \u001b[0;36mFoundryClient.run_app\u001b[0;34m(self, model_run_name, data_rows, app_id)\u001b[0m\n\u001b[1;32m 77\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mrun_app\u001b[39m(\u001b[38;5;28mself\u001b[39m, model_run_name: \u001b[38;5;28mstr\u001b[39m,\n\u001b[1;32m 78\u001b[0m data_rows: Union[DataRowIds, GlobalKeys], app_id: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Task:\n\u001b[0;32m---> 79\u001b[0m app \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_app\u001b[49m\u001b[43m(\u001b[49m\u001b[43mapp_id\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 81\u001b[0m data_rows_query \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbuild_catalog_query(data_rows)\n\u001b[1;32m 83\u001b[0m params \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 84\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodelId\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mstr\u001b[39m(app\u001b[38;5;241m.\u001b[39mmodel_id),\n\u001b[1;32m 85\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: model_run_name,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124montologyId\u001b[39m\u001b[38;5;124m\"\u001b[39m: app\u001b[38;5;241m.\u001b[39montology_id\n\u001b[1;32m 93\u001b[0m }\n", + "File \u001b[0;32m~/repos/labelbox-python/labelbox/schema/foundry/foundry_client.py:60\u001b[0m, in \u001b[0;36mFoundryClient._get_app\u001b[0;34m(self, id)\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 59\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m exceptions\u001b[38;5;241m.\u001b[39mLabelboxError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mUnable to get app with id \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mid\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m, e)\n\u001b[0;32m---> 60\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mApp\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mresponse\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfindModelFoundryApp\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/repos/labelbox-python/env/lib/python3.10/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n", + "\u001b[0;31mValidationError\u001b[0m: 1 validation error for App\ndescription\n none is not an allowed value (type=type_error.none.not_allowed)" ] } ], @@ -257,7 +267,7 @@ "source": [ "## Step 6: Map ontology through the UI\n", "\n", - "Currently we do not support this workflow through the SDK\n", + "Something like: Mapping a model's ontology to a project's ontology is currently not supported through the SDK, however, to showcase how to send foundry predictions to a project, we are going to generate the mapping of the foundry app ontology to the project ontology through the UI.\n", "\n", "#### Workflow\n", "\n", @@ -265,9 +275,11 @@ "2. Select **Select all** in the top right corner\n", "3. Select **Manage selection** > **Send to Annotate**\n", "4. Specify the project we created from the project dropdown menu\n", + "5. Selecting a workflow step is not required since we are not sending annotations from the UI to a project using this notebook\n", "5. Mark **Include model predictions** then scroll down and select **Map**\n", "6. Select the incoming ontology and matching ontology feature for both Car and Person\n", - "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner" + "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner\n", + "8. Do not save this configuration, since we are not sending predictions to a project using this UI modal. We will be sending predictions in the following steps using the SDK" ], "cell_type": "markdown" }, @@ -284,20 +296,24 @@ { "metadata": {}, "source": [ - "## Step 6: Send model generated annotations from catalog to annotate\n", + "## Step 7: Send model generated annotations from catalog to annotate\n", "\n", "### Parameters\n", "\n", "When you send predicted data rows to annotate from catalog, you may choose to include or exclude certain parameters, at a minimum a predictions_ontology_mapping will need to be provided:\n", "\n", - "* predictions_ontology_mapping: A mapping of feature schema ids to feature schema ids\n", - "* exclude_data_rows_in_project: Excludes data rows that are already in the project. \n", - "* override_existing_annotations_rule: The strategy defining how to handle conflicts in classifications between the data rows that already exist in the project and incoming predictions from the source model run or annotations from the source project. \n", - " * Defaults to ConflictResolutionStrategy.KeepExisting\n", - " * Options include:\n", - " * ConflictResolutionStrategy.KeepExisting\n", - " * ConflictResolutionStrategy.OverrideWithPredictions\n", - "* param batch_priority: The priority of the batch.\n" + "* `predictions_ontology_mapping`\n", + " - A dictionary containing the mapping of the model's ontology feature schema ids to the project's ontology feature schema ids\n", + "* `exclude_data_rows_in_project`\n", + " - Excludes data rows that are already in the project. \n", + "* `override_existing_annotations_rule` \n", + " - The strategy defining how to handle conflicts in classifications between the data rows that already exist in the project and incoming predictions from the source model run or annotations from the source project. \n", + " * Defaults to ConflictResolutionStrategy.KeepExisting\n", + " * Options include:\n", + " * ConflictResolutionStrategy.KeepExisting\n", + " * ConflictResolutionStrategy.OverrideWithPredictions\n", + "* `param batch_priority`\n", + " - The priority of the batch.\n" ], "cell_type": "markdown" }, @@ -309,7 +325,7 @@ "send_to_annotations_params = {\n", " \"predictions_ontology_mapping\": PREDICTIONS_ONTOLOGY_MAPPING,\n", " \"exclude_data_rows_in_project\": False,\n", - " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithPredictions,\n", + " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithAnnotations,\n", " \"batch_priority\": 5,\n", "}\n", "\n", diff --git a/examples/foundry/text_entities.ipynb b/examples/foundry/text_entities.ipynb deleted file mode 100644 index cfc4bc4b1..000000000 --- a/examples/foundry/text_entities.ipynb +++ /dev/null @@ -1,347 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 2, - "metadata": {}, - "cells": [ - { - "metadata": {}, - "source": [ - "\n", - " \n", - "" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# Foundry overview\n", - "\n", - "This notebook is used to go over the basic of foundry through the Python SDK" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "Foundry incorporates foundational models into your Labelbox workflow. You can use Foundry to:\n", - "\n", - "* Predict (infer) labels from your data\n", - "* Compare the performance of different foundational models with your data and ontologies.\n", - "* Prototype, diagnose, and refine a machine learning app to solve specific business needs.\n", - "\n", - "Foundry creates model runs that predict data row annotations based on your input." - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "!pip install -q labelbox" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "import labelbox as lb\n", - "from labelbox.schema.conflict_resolution_strategy import ConflictResolutionStrategy\n", - "import uuid" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "# API Key and Client\n", - "\n", - "Provide a valid API key below in order to properly connect to the Labelbox Client." - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# Add your API key\n", - "API_KEY = \"\"\n", - "#To get your API key go to: Workspace settings -> API -> Create API Key\n", - "client = lb.Client(api_key=API_KEY)" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "# End-to-end example: Run foundry and send to annotate from catalog" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "## Step 1: Import data rows into catelog" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# send a sample images as data row for a dataset\n", - "data_rows = []\n", - "global_keys =[]\n", - "\n", - "global_key = str(uuid.uuid4())\n", - "data_rows.append({\n", - " \"row_data\":\n", - " \"https://storage.googleapis.com/labelbox-datasets/Business_text/business_41.txt\",\n", - "\"global_key\":\n", - " global_key\n", - "})\n", - "global_keys.append(global_key)\n", - "\n", - "\n", - "dataset = client.create_dataset(name=\"foundry-demo-dataset\")\n", - "task = dataset.create_data_rows(data_rows)\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")\n", - "print(f\"Failed data rows: {task.failed_data_rows}\")" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 2: Create/select an ontology that matches model\n", - "\n", - "Your project should have the correct ontology setup with all the tools and classifications supported for your model and data type.\n", - "\n", - "In this tutorial, we will use OpenAI GPT 3.5 to preform name-entity recognition from a sample story." - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# Create ontology with two image classification radio options that is included with YOLOv8 CLS: Computer and Phone\n", - "ontology_builder = lb.OntologyBuilder(\n", - " classifications=[],\n", - " tools=[\n", - " lb.Tool(\n", - " tool=lb.Tool.Type.NER,\n", - " name=\"company names\"\n", - " )\n", - " ]\n", - ")\n", - "\n", - "ontology = client.create_ontology(\"Text Entity Demo Foundry\",\n", - " ontology_builder.asdict(),\n", - " media_type=lb.MediaType.Text)" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 3: Create a labeling project\n", - "\n", - "Connect the ontology to the labeling project" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "project = client.create_project(name=\"Foundry Text Entity Demo\",\n", - " media_type=lb.MediaType.Text)\n", - "\n", - "project.setup_editor(ontology)" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 4: Create foundry application in UI\n", - "\n", - "Currently we do not support this workflow through the SDK\n", - "#### Workflow:\n", - "\n", - "1. Navigate to model and select ***Create*** > ***App***\n", - "\n", - "2. Select ***OpenAI GPT 3.5 Turbo*** and name your foundry application\n", - "\n", - "3. Click ***Select an ontology*** and select your ontology\n", - "\n", - "4. Customize your perimeters and then select ***Save & Create***\n" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "#Select your foundry application inside the UI and copy the APP ID from the top right corner\n", - "GPT3_APP_ID = \"\"" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 5: Run foundry app on data rows\n" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "task = client.run_foundry_app(model_run_name=f\"OpenAI-GPT-3.5-{str(uuid.uuid4())}\",\n", - " data_rows=lb.GlobalKeys(\n", - " global_keys # Provide a list of global keys \n", - " ), \n", - " app_id=GPT3_APP_ID)\n", - "\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\") \n", - "\n", - "#Obtain model run ID from task\n", - "MODEL_RUN_ID = task.metadata[\"modelRunId\"]" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 6: Map ontology through the UI\n", - "\n", - "Currently we do not support this workflow through the SDK\n", - "\n", - "#### Workflow\n", - "\n", - "1. Navigate to your dataset you created for your model run\n", - "2. Select **Select all** in the top right corner\n", - "3. Select **Manage selection** > **Send to Annotate**\n", - "4. Specify the project we created from the project dropdown menu\n", - "5. Mark **Include model predictions** then scroll down and select **Edit Map**\n", - "6. Press the **Copy ontology mapping as JSON** in the top right corner" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# Copy map ontology through the UI then paste JSON file here\n", - "PREDICTIONS_ONTOLOGY_MAPPING = {}" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Step 7: Send model generated annotations from catalog to annotate\n", - "\n", - "### Parameters\n", - "\n", - "When you send predicted data rows to annotate from catalog, you may choose to include or exclude certain parameters, at a minimum a predictions_ontology_mapping will need to be provided:\n", - "\n", - "* predictions_ontology_mapping: A mapping of feature schema ids to feature schema ids\n", - "* exclude_data_rows_in_project: Excludes data rows that are already in the project. \n", - "* override_existing_annotations_rule: The strategy defining how to handle conflicts in classifications between the data rows that already exist in the project and incoming predictions from the source model run or annotations from the source project. \n", - " * Defaults to ConflictResolutionStrategy.KeepExisting\n", - " * Options include:\n", - " * ConflictResolutionStrategy.KeepExisting\n", - " * ConflictResolutionStrategy.OverrideWithPredictions\n", - "* param batch_priority: The priority of the batch.\n" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "model_run = client.get_model_run(MODEL_RUN_ID)\n", - "\n", - "send_to_annotations_params = {\n", - " \"predictions_ontology_mapping\": PREDICTIONS_ONTOLOGY_MAPPING,\n", - " \"exclude_data_rows_in_project\": False,\n", - " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithPredictions,\n", - " \"batch_priority\": 5,\n", - "}\n", - "\n", - "\n", - "task = model_run.send_to_annotate_from_model(\n", - " destination_project_id=project.uid,\n", - " task_queue_id=None, #ID of workflow task, set ID to None if you want to convert pre-labels to ground truths or obtain task queue id through project.task_queues().\n", - " batch_name=\"Foundry Demo Batch\",\n", - " data_rows=lb.GlobalKeys(\n", - " global_keys # Provide a list of global keys from foundry app task\n", - " ),\n", - " params=send_to_annotations_params\n", - " )\n", - "\n", - "task.wait_till_done()\n", - "\n", - "print(f\"Errors: {task.errors}\")" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [ - "## Clean up" - ], - "cell_type": "markdown" - }, - { - "metadata": {}, - "source": [ - "# project.delete()\n", - "# dataset.delete()\n", - "# model_run.delete()" - ], - "cell_type": "code", - "outputs": [], - "execution_count": null - }, - { - "metadata": {}, - "source": [], - "cell_type": "markdown" - } - ] -} \ No newline at end of file From c89a0dc8fb4c55f1051a3d84ed7c06948f12f251 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Fri, 5 Jan 2024 11:24:21 -0600 Subject: [PATCH 8/9] made a few changes to wording --- examples/foundry/object_detection.ipynb | 35 ++++++++++--------------- 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/examples/foundry/object_detection.ipynb b/examples/foundry/object_detection.ipynb index 8c3da1ab7..c205feca2 100644 --- a/examples/foundry/object_detection.ipynb +++ b/examples/foundry/object_detection.ipynb @@ -82,7 +82,7 @@ "metadata": {}, "source": [ "# Add your API key\n", - "API_KEY = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJjbG9vcmRpaGUwMDkyMDcza2Nvcm5jajdnIiwib3JnYW5pemF0aW9uSWQiOiJjbG9vcmRpZ3cwMDkxMDcza2M2cG9oeWFiIiwiYXBpS2V5SWQiOiJjbHE1OWd6M3MwMDRxMDcweDRwb3BmajV4Iiwic2VjcmV0IjoiOWE5ZWVmNDczNDI2ZDI2ZjUwOTU5ZDY4ZmZiNGJmMWMiLCJpYXQiOjE3MDI1NjIwMjYsImV4cCI6MjMzMzcxNDAyNn0.BsdKnIr8Np4eYxJ_6VILmuY-D6n2gUdvGKGvMHq9Eh4\"\n", + "API_KEY = \"\"\n", "# To get your API key go to: Workspace settings -> API -> Create API Key\n", "client = lb.Client(api_key=API_KEY)" ], @@ -211,7 +211,7 @@ "metadata": {}, "source": [ "#Select your foundry application inside the UI and copy the APP ID from the top right corner\n", - "AMAZON_REKOGNITION_APP_ID = \"990626af-4cbf-4a02-983c-e283b4e0758a\"" + "AMAZON_REKOGNITION_APP_ID = \"\"" ], "cell_type": "code", "outputs": [], @@ -245,18 +245,10 @@ "cell_type": "code", "outputs": [ { - "ename": "ValidationError", - "evalue": "1 validation error for App\ndescription\n none is not an allowed value (type=type_error.none.not_allowed)", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[47], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m task \u001b[38;5;241m=\u001b[39m \u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_foundry_app\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_run_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mAmazon-\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[38;5;28;43mstr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43muuid\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43muuid4\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[43mdata_rows\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlb\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mGlobalKeys\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43mglobal_key\u001b[49m\u001b[43m]\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Provide a list of global keys \u001b[39;49;00m\n\u001b[1;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mapp_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mAMAZON_REKOGNITION_APP_ID\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 7\u001b[0m task\u001b[38;5;241m.\u001b[39mwait_till_done()\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mErrors: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtask\u001b[38;5;241m.\u001b[39merrors\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m) \n", - "File \u001b[0;32m~/repos/labelbox-python/labelbox/client.py:1943\u001b[0m, in \u001b[0;36mClient.run_foundry_app\u001b[0;34m(self, model_run_name, data_rows, app_id)\u001b[0m\n\u001b[1;32m 1934\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1935\u001b[0m \u001b[38;5;124;03mRun a foundry app\u001b[39;00m\n\u001b[1;32m 1936\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1940\u001b[0m \u001b[38;5;124;03m app_id (str): Foundry app to run predictions with\u001b[39;00m\n\u001b[1;32m 1941\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1942\u001b[0m foundry_client \u001b[38;5;241m=\u001b[39m FoundryClient(\u001b[38;5;28mself\u001b[39m)\n\u001b[0;32m-> 1943\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfoundry_client\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_app\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_run_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata_rows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mapp_id\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/repos/labelbox-python/labelbox/schema/foundry/foundry_client.py:79\u001b[0m, in \u001b[0;36mFoundryClient.run_app\u001b[0;34m(self, model_run_name, data_rows, app_id)\u001b[0m\n\u001b[1;32m 77\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mrun_app\u001b[39m(\u001b[38;5;28mself\u001b[39m, model_run_name: \u001b[38;5;28mstr\u001b[39m,\n\u001b[1;32m 78\u001b[0m data_rows: Union[DataRowIds, GlobalKeys], app_id: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Task:\n\u001b[0;32m---> 79\u001b[0m app \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_app\u001b[49m\u001b[43m(\u001b[49m\u001b[43mapp_id\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 81\u001b[0m data_rows_query \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbuild_catalog_query(data_rows)\n\u001b[1;32m 83\u001b[0m params \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 84\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodelId\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mstr\u001b[39m(app\u001b[38;5;241m.\u001b[39mmodel_id),\n\u001b[1;32m 85\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: model_run_name,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124montologyId\u001b[39m\u001b[38;5;124m\"\u001b[39m: app\u001b[38;5;241m.\u001b[39montology_id\n\u001b[1;32m 93\u001b[0m }\n", - "File \u001b[0;32m~/repos/labelbox-python/labelbox/schema/foundry/foundry_client.py:60\u001b[0m, in \u001b[0;36mFoundryClient._get_app\u001b[0;34m(self, id)\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 59\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m exceptions\u001b[38;5;241m.\u001b[39mLabelboxError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mUnable to get app with id \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mid\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m, e)\n\u001b[0;32m---> 60\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mApp\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mresponse\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfindModelFoundryApp\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/repos/labelbox-python/env/lib/python3.10/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n", - "\u001b[0;31mValidationError\u001b[0m: 1 validation error for App\ndescription\n none is not an allowed value (type=type_error.none.not_allowed)" + "name": "stdout", + "output_type": "stream", + "text": [ + "Errors: None\n" ] } ], @@ -275,11 +267,11 @@ "2. Select **Select all** in the top right corner\n", "3. Select **Manage selection** > **Send to Annotate**\n", "4. Specify the project we created from the project dropdown menu\n", - "5. Selecting a workflow step is not required since we are not sending annotations from the UI to a project using this notebook\n", - "5. Mark **Include model predictions** then scroll down and select **Map**\n", - "6. Select the incoming ontology and matching ontology feature for both Car and Person\n", - "7. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner\n", - "8. Do not save this configuration, since we are not sending predictions to a project using this UI modal. We will be sending predictions in the following steps using the SDK" + "5. Selecting a workflow step is not required since we are not sending annotations from the UI to a project using this notebook \n", + "6. Mark **Include model predictions** then scroll down and select **Map**\n", + "7. Select the incoming ontology and matching ontology feature for both Car and Person\n", + "8. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner\n", + "9. Do not save this configuration, since we are not sending predictions to a project using this UI modal. We will be sending predictions in the following steps using the SDK" ], "cell_type": "markdown" }, @@ -287,7 +279,7 @@ "metadata": {}, "source": [ "# Copy map ontology through the UI then paste JSON file here\n", - "PREDICTIONS_ONTOLOGY_MAPPING = \"\"" + "PREDICTIONS_ONTOLOGY_MAPPING = {}" ], "cell_type": "code", "outputs": [], @@ -312,6 +304,7 @@ " * Options include:\n", " * ConflictResolutionStrategy.KeepExisting\n", " * ConflictResolutionStrategy.OverrideWithPredictions\n", + " * ConflictResolutionStrategy.OverrideWithAnnotations\n", "* `param batch_priority`\n", " - The priority of the batch.\n" ], @@ -325,7 +318,7 @@ "send_to_annotations_params = {\n", " \"predictions_ontology_mapping\": PREDICTIONS_ONTOLOGY_MAPPING,\n", " \"exclude_data_rows_in_project\": False,\n", - " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithAnnotations,\n", + " \"override_existing_annotations_rule\": ConflictResolutionStrategy.OverrideWithPredictions,\n", " \"batch_priority\": 5,\n", "}\n", "\n", From 2fd2ff4b5af621778a290fef7830a96864104e93 Mon Sep 17 00:00:00 2001 From: Gabefire <33893811+Gabefire@users.noreply.github.com> Date: Fri, 12 Jan 2024 16:11:18 -0600 Subject: [PATCH 9/9] fixed small typo --- examples/foundry/object_detection.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/foundry/object_detection.ipynb b/examples/foundry/object_detection.ipynb index c205feca2..d699bb9d0 100644 --- a/examples/foundry/object_detection.ipynb +++ b/examples/foundry/object_detection.ipynb @@ -259,7 +259,7 @@ "source": [ "## Step 6: Map ontology through the UI\n", "\n", - "Something like: Mapping a model's ontology to a project's ontology is currently not supported through the SDK, however, to showcase how to send foundry predictions to a project, we are going to generate the mapping of the foundry app ontology to the project ontology through the UI.\n", + "Mapping a model's ontology to a project's ontology is currently not supported through the SDK, however, to showcase how to send foundry predictions to a project, we are going to generate the mapping of the foundry app ontology to the project ontology through the UI.\n", "\n", "#### Workflow\n", "\n",