-
Notifications
You must be signed in to change notification settings - Fork 4
Feat/custom pipeline nicegui 3 3 1 #268
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
5fef607
35b5f56
c55a75d
8eed64b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -50,6 +50,10 @@ class SubmitForm: | |
| deadline: str = (datetime.now().astimezone() + timedelta(hours=24)).strftime("%Y-%m-%d %H:%M") | ||
| validate_only: bool = False | ||
| onboard_to_aignostics_portal: bool = False | ||
| gpu_type: str = "A100" | ||
| gpu_provisioning_mode: str = "ON_DEMAND" | ||
| max_gpus_per_slide: int = 1 | ||
| cpu_provisioning_mode: str = "ON_DEMAND" | ||
|
|
||
|
|
||
| submit_form = SubmitForm() | ||
|
|
@@ -702,11 +706,25 @@ def _submit() -> None: | |
| """Submit the application run.""" | ||
| ui.notify("Submitting application run ...", type="info") | ||
| try: | ||
| # Build custom metadata with pipeline configuration | ||
| custom_metadata = { | ||
| "pipeline": { | ||
| "gpu": { | ||
| "gpu_type": submit_form.gpu_type, | ||
| "provisioning_mode": submit_form.gpu_provisioning_mode, | ||
| "max_gpus_per_slide": submit_form.max_gpus_per_slide, | ||
| }, | ||
| "cpu": { | ||
| "provisioning_mode": submit_form.cpu_provisioning_mode, | ||
| }, | ||
| }, | ||
| } | ||
|
|
||
| run = service.application_run_submit_from_metadata( | ||
| application_id=str(submit_form.application_id), | ||
| metadata=submit_form.metadata or [], | ||
| application_version=str(submit_form.application_version), | ||
| custom_metadata=None, # TODO(Helmut): Allow user to edit custom metadata | ||
| custom_metadata=custom_metadata, | ||
| note=submit_form.note, | ||
| tags=set(submit_form.tags) if submit_form.tags else None, | ||
| due_date=datetime.strptime(submit_form.due_date, "%Y-%m-%d %H:%M") | ||
|
|
@@ -816,6 +834,80 @@ def _update_upload_progress() -> None: | |
| break | ||
| _upload_ui.refresh(submit_form.metadata) | ||
|
|
||
| with ui.step("Pipeline"): | ||
| user_info: UserInfo | None = app.storage.tab.get("user_info", None) | ||
| can_configure_pipeline = ( | ||
| user_info | ||
| and user_info.organization | ||
| and user_info.organization.name | ||
| and user_info.organization.name.lower() in {"aignostics", "pre-alpha-org", "lmu", "charite"} | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Security & Scalability Consideration: Hardcoding organization names in the frontend for feature gating has maintainability implications: user_info.organization.name.lower() in {"aignostics", "pre-alpha-org", "lmu", "charite"}Suggestions:
Current Risk: Low (alpha feature with known orgs) Why it matters: As this feature rolls out to more organizations, changing code for each org request is not sustainable for a medical device SDK. |
||
| ) | ||
|
|
||
| if can_configure_pipeline: | ||
| with ui.column(align_items="start").classes("w-full"): | ||
| ui.label("GPU Configuration").classes("text-h6 mb-0 pb-0") | ||
| ui.label( | ||
| "Configure GPU resources for processing your whole slide images. " | ||
| "These settings control the type and provisioning mode of GPUs used during AI analysis." | ||
| ).classes("text-sm mt-0 pt-0 mb-4") | ||
|
|
||
| with ui.row().classes("w-full gap-4"): | ||
| ui.select( | ||
| label="GPU Type", | ||
| options={"L4": "L4", "A100": "A100"}, | ||
| value=submit_form.gpu_type, | ||
| ).bind_value(submit_form, "gpu_type").mark("SELECT_GPU_TYPE").classes("w-1/3") | ||
|
|
||
| ui.number( | ||
| label="Max GPUs per Slide", | ||
| value=submit_form.max_gpus_per_slide, | ||
| min=1, | ||
| max=8, | ||
| step=1, | ||
| ).bind_value(submit_form, "max_gpus_per_slide").mark("NUMBER_MAX_GPUS_PER_SLIDE").classes( | ||
| "w-1/3" | ||
| ) | ||
|
|
||
| ui.select( | ||
| label="GPU Provisioning Mode", | ||
| options={ | ||
| "SPOT": "Spot nodes (lower cost, better availability, might be preempted and retried)", | ||
| "ON_DEMAND": ( | ||
| "On demand nodes (higher cost, limited availability, processing might be delayed)" | ||
| ), | ||
| }, | ||
| value=submit_form.gpu_provisioning_mode, | ||
| ).bind_value(submit_form, "gpu_provisioning_mode").mark("SELECT_GPU_PROVISIONING_MODE").classes( | ||
| "w-1/3" | ||
| ) | ||
|
|
||
| ui.separator().classes("my-4") | ||
|
|
||
| ui.label("CPU Configuration").classes("text-h6 mb-0 pb-0") | ||
| ui.label("Configure CPU resources for algorithms that do not require GPU acceleration.").classes( | ||
| "text-sm mt-0 pt-0 mb-4" | ||
| ) | ||
|
|
||
| with ui.row().classes("w-full gap-4"): | ||
| ui.select( | ||
| label="CPU Provisioning Mode", | ||
| options={ | ||
| "SPOT": "Spot nodes (lower cost, better availability, might be preempted and retried)", | ||
| "ON_DEMAND": "On demand nodes (higher cost, limited availability, might be delayed)", | ||
| }, | ||
| value=submit_form.cpu_provisioning_mode, | ||
| ).bind_value(submit_form, "cpu_provisioning_mode").mark("SELECT_CPU_PROVISIONING_MODE").classes( | ||
| "w-1/2" | ||
| ) | ||
| else: | ||
| ui.label( | ||
| "Pipeline configuration is not available for your organization. Default settings will be used." | ||
| ).classes("text-body1") | ||
|
|
||
| with ui.stepper_navigation(): | ||
| ui.button("Next", on_click=stepper.next).mark("BUTTON_PIPELINE_NEXT") | ||
| ui.button("Back", on_click=stepper.previous).props("flat") | ||
|
|
||
| with ui.step("Submit"): | ||
| _upload_ui([]) | ||
| ui.timer(0.1, callback=_update_upload_progress) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -7,16 +7,76 @@ | |
| import os | ||
| import sys | ||
| from datetime import UTC, datetime | ||
| from enum import StrEnum | ||
| from typing import Any, Literal | ||
|
|
||
| from loguru import logger | ||
| from pydantic import BaseModel, Field, ValidationError | ||
| from pydantic import BaseModel, Field, PositiveInt, ValidationError | ||
|
|
||
| from aignostics.utils import user_agent | ||
|
|
||
| SDK_METADATA_SCHEMA_VERSION = "0.0.4" | ||
| ITEM_SDK_METADATA_SCHEMA_VERSION = "0.0.3" | ||
|
|
||
| # Pipeline orchestration defaults | ||
| DEFAULT_GPU_TYPE = "A100" | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Excellent Design Choice ✅ Defining pipeline defaults as module-level constants:
This follows best practices for configuration management in enterprise software. |
||
| DEFAULT_MAX_GPUS_PER_SLIDE = 1 | ||
| DEFAULT_GPU_PROVISIONING_MODE = "ON_DEMAND" | ||
| DEFAULT_CPU_PROVISIONING_MODE = "ON_DEMAND" | ||
|
|
||
|
|
||
| class GPUType(StrEnum): | ||
| """Type of GPU to use for processing.""" | ||
|
|
||
| L4 = "L4" | ||
| A100 = "A100" | ||
|
|
||
|
|
||
| class ProvisioningMode(StrEnum): | ||
| """Provisioning mode for resources.""" | ||
|
|
||
| SPOT = "SPOT" | ||
| ON_DEMAND = "ON_DEMAND" | ||
|
|
||
|
|
||
| class CPUConfig(BaseModel): | ||
| """Configuration for CPU resources.""" | ||
|
|
||
| provisioning_mode: ProvisioningMode = Field( | ||
| default_factory=lambda: ProvisioningMode(DEFAULT_CPU_PROVISIONING_MODE), | ||
| description="The provisioning mode for CPU resources (SPOT or ON_DEMAND)", | ||
| ) | ||
|
|
||
|
|
||
| class GPUConfig(BaseModel): | ||
| """Configuration for GPU resources.""" | ||
|
|
||
| gpu_type: GPUType = Field( | ||
| default_factory=lambda: GPUType(DEFAULT_GPU_TYPE), | ||
| description="The type of GPU to use (L4 or A100)", | ||
| ) | ||
| provisioning_mode: ProvisioningMode = Field( | ||
| default_factory=lambda: ProvisioningMode(DEFAULT_GPU_PROVISIONING_MODE), | ||
| description="The provisioning mode for GPU resources (SPOT or ON_DEMAND)", | ||
| ) | ||
| max_gpus_per_slide: PositiveInt = Field( | ||
| default=DEFAULT_MAX_GPUS_PER_SLIDE, | ||
| description="The maximum number of GPUs to allocate per slide", | ||
| ) | ||
|
|
||
|
|
||
| class PipelineConfig(BaseModel): | ||
| """Pipeline configuration for dynamic orchestration.""" | ||
|
|
||
| gpu: GPUConfig = Field( | ||
| default_factory=GPUConfig, | ||
| description="GPU resource configuration", | ||
| ) | ||
| cpu: CPUConfig = Field( | ||
| default_factory=CPUConfig, | ||
| description="CPU resource configuration", | ||
| ) | ||
|
|
||
|
|
||
| class SubmissionMetadata(BaseModel): | ||
| """Metadata about how the SDK was invoked.""" | ||
|
|
@@ -121,6 +181,7 @@ class RunSdkMetadata(BaseModel): | |
| note: str | None = Field(None, description="Optional user note for the run") | ||
| workflow: WorkflowMetadata | None = Field(None, description="Workflow control flags") | ||
| scheduling: SchedulingMetadata | None = Field(None, description="Scheduling information") | ||
| pipeline: PipelineConfig | None = Field(None, description="Pipeline orchestration configuration") | ||
|
|
||
| model_config = {"extra": "forbid"} # Reject unknown fields | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Enhancement Suggestion:
Consider using enum types directly for validation instead of strings:
Benefits:
Current approach is acceptable - this is an enhancement, not a bug.