Skip to content

Commit

Permalink
Backend-defined package settings (#2104)
Browse files Browse the repository at this point in the history
* settings wip shizzle

* gpu dropdown

* Option to enable GUI hardware acceleration (#2055)

Add a settings option in the 'Advanced Tab' to allow hardware acceleration.

* Fixed release CI (#2059)

* Bump to 0.19.1 (#2061)

* Added sorting back to Image File Iterator (#2067)

* fix duplicating nodes and exporting firing twice (#2068)

fixes #2065

* changes to theme selection (#2060)

instead of a toggle for always dark the user can now choose between:

Dark Mode / Light Mode / System

* Mac Stuff (#2069)

* add delete and select all to the nodes menu

* use proper delete and multiselect keycode on mac

* Features (#2053)

* WIP

* new api

* /features endpoint

* New backend API

* Switch to packages in the UI

* Basic UI support for features

* typo

* UI WIP

* Unified markdown handling

* slight fixes

* Properly throw server errors

* Better error messages

* Basic feature checking

* Added spinner while refreshing

* UI changes for backend settings

* some wip stuff

* Fix setting state

* fix some typing and stuff

* use immer

* some changes

* wip

* refactor execution settings

* Update backend/src/api.py

Co-authored-by: Michael Schmidt <mitchi5000.ms@googlemail.com>

* wip

* refactor stuff

* fix cache stuff

* ncnn settings

* cleanup

* lint

* remove unnecessary typevar

* remove TODO

* Some PR suggestions

* ...

* linting

* Require icon and color

* More settings (#2137)

* Basic API refactor

* Make tensor cache path nullable

* Minor changes

* Setting components and other stuff

* Only show clear cache when cache is enabled

* Better type

* Get rid of cache object

* linter is always right

---------

Co-authored-by: Toni Förster <toni.foerster@gmail.com>
Co-authored-by: Michael Schmidt <mitchi5000.ms@googlemail.com>
Co-authored-by: theflyingzamboni <55669985+theflyingzamboni@users.noreply.github.com>
  • Loading branch information
4 people committed Aug 28, 2023
1 parent cc1023f commit 1ec090c
Show file tree
Hide file tree
Showing 36 changed files with 935 additions and 814 deletions.
101 changes: 97 additions & 4 deletions backend/src/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
Tuple,
TypedDict,
TypeVar,
Union,
)

from sanic.log import logger
Expand All @@ -30,6 +31,7 @@
from nodes.base_input import BaseInput
from nodes.base_output import BaseOutput
from nodes.group import Group, GroupId, NestedGroup, NestedIdGroup
from nodes.utils.exec_options import SettingsJson, get_execution_options

KB = 1024**1
MB = 1024**2
Expand Down Expand Up @@ -300,15 +302,99 @@ def disabled(details: str | None = None) -> "FeatureState":
return FeatureState(is_enabled=False, details=details)


@dataclass
class ToggleSetting:
label: str
key: str
description: str
default: bool = False
disabled: bool = False
type: str = "toggle"


class DropdownOption(TypedDict):
label: str
value: str


@dataclass
class DropdownSetting:
label: str
key: str
description: str
options: List[DropdownOption]
default: str
disabled: bool = False
type: str = "dropdown"


@dataclass
class NumberSetting:
label: str
key: str
description: str
min: float
max: float
default: float = 0
disabled: bool = False
type: str = "number"


@dataclass
class CacheSetting:
label: str
key: str
description: str
directory: str
default: str = ""
disabled: bool = False
type: str = "cache"


Setting = Union[ToggleSetting, DropdownSetting, NumberSetting, CacheSetting]


class SettingsParser:
def __init__(self, raw: SettingsJson) -> None:
self.__settings = raw

def get_bool(self, key: str, default: bool) -> bool:
value = self.__settings.get(key, default)
if isinstance(value, bool):
return value
raise ValueError(f"Invalid bool value for {key}: {value}")

def get_int(self, key: str, default: int) -> int:
value = self.__settings.get(key, default)
if isinstance(value, int) and not isinstance(value, bool):
return value
raise ValueError(f"Invalid str value for {key}: {value}")

def get_str(self, key: str, default: str) -> str:
value = self.__settings.get(key, default)
if isinstance(value, str):
return value
raise ValueError(f"Invalid str value for {key}: {value}")

def get_cache_location(self, key: str) -> str | None:
value = self.__settings.get(key)
if isinstance(value, str) or value is None:
return value or None
raise ValueError(f"Invalid cache location value for {key}: {value}")


@dataclass
class Package:
where: str
id: str
name: str
description: str
icon: str
color: str
dependencies: List[Dependency] = field(default_factory=list)
categories: List[Category] = field(default_factory=list)
features: List[Feature] = field(default_factory=list)
settings: List[Setting] = field(default_factory=list)

def add_category(
self,
Expand All @@ -329,12 +415,12 @@ def add_category(
self.categories.append(result)
return result

def add_dependency(
self,
dependency: Dependency,
):
def add_dependency(self, dependency: Dependency):
self.dependencies.append(dependency)

def add_setting(self, setting: Setting):
self.settings.append(setting)

def add_feature(
self,
id: str, # pylint: disable=redefined-builtin
Expand All @@ -348,6 +434,9 @@ def add_feature(
self.features.append(feature)
return feature

def get_settings(self) -> SettingsParser:
return SettingsParser(get_execution_options().get_package_settings(self.id))


def _iter_py_files(directory: str):
for root, _, files in os.walk(directory):
Expand Down Expand Up @@ -427,13 +516,17 @@ def add_package(
name: str,
description: str,
dependencies: List[Dependency] | None = None,
icon: str = "BsQuestionCircleFill",
color: str = "#777777",
) -> Package:
return registry.add(
Package(
where=where,
id=id,
name=name,
description=description,
icon=icon,
color=color,
dependencies=dependencies or [],
)
)
11 changes: 4 additions & 7 deletions backend/src/nodes/impl/ncnn/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,10 @@

use_gpu = False

from ...utils.exec_options import ExecutionOptions
from .model import NcnnModelWrapper


def create_ncnn_net(
model: NcnnModelWrapper, exec_options: ExecutionOptions
) -> ncnn.Net:
def create_ncnn_net(model: NcnnModelWrapper, gpu_index: int) -> ncnn.Net:
net = ncnn.Net()

if model.fp == "fp16":
Expand All @@ -33,7 +30,7 @@ def create_ncnn_net(
if use_gpu:
# Use vulkan compute
net.opt.use_vulkan_compute = True
net.set_vulkan_device(exec_options.ncnn_gpu_index)
net.set_vulkan_device(gpu_index)

# Load model param and bin
net.load_param_mem(model.model.write_param())
Expand All @@ -51,9 +48,9 @@ def create_ncnn_net(
__session_cache: WeakKeyDictionary[NcnnModelWrapper, ncnn.Net] = WeakKeyDictionary()


def get_ncnn_net(model: NcnnModelWrapper, exec_options: ExecutionOptions) -> ncnn.Net:
def get_ncnn_net(model: NcnnModelWrapper, gpu_index: int) -> ncnn.Net:
cached = __session_cache.get(model)
if cached is None:
cached = create_ncnn_net(model, exec_options)
cached = create_ncnn_net(model, gpu_index)
__session_cache[model] = cached
return cached
39 changes: 26 additions & 13 deletions backend/src/nodes/impl/onnx/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,44 +4,47 @@

import onnxruntime as ort

from ...utils.exec_options import ExecutionOptions
from .model import OnnxModel


def create_inference_session(
model: OnnxModel, exec_options: ExecutionOptions
model: OnnxModel,
gpu_index: int,
execution_provider: str,
should_tensorrt_fp16: bool = False,
tensorrt_cache_path: str | None = None,
) -> ort.InferenceSession:
if exec_options.onnx_execution_provider == "TensorrtExecutionProvider":
if execution_provider == "TensorrtExecutionProvider":
providers = [
(
"TensorrtExecutionProvider",
{
"device_id": exec_options.onnx_gpu_index,
"trt_engine_cache_enable": exec_options.onnx_should_tensorrt_cache,
"trt_engine_cache_path": exec_options.onnx_tensorrt_cache_path,
"trt_fp16_enable": exec_options.onnx_should_tensorrt_fp16,
"device_id": gpu_index,
"trt_engine_cache_enable": tensorrt_cache_path is not None,
"trt_engine_cache_path": tensorrt_cache_path,
"trt_fp16_enable": should_tensorrt_fp16,
},
),
(
"CUDAExecutionProvider",
{
"device_id": exec_options.onnx_gpu_index,
"device_id": gpu_index,
},
),
"CPUExecutionProvider",
]
elif exec_options.onnx_execution_provider == "CUDAExecutionProvider":
elif execution_provider == "CUDAExecutionProvider":
providers = [
(
"CUDAExecutionProvider",
{
"device_id": exec_options.onnx_gpu_index,
"device_id": gpu_index,
},
),
"CPUExecutionProvider",
]
else:
providers = [exec_options.onnx_execution_provider, "CPUExecutionProvider"]
providers = [execution_provider, "CPUExecutionProvider"]

session = ort.InferenceSession(model.bytes, providers=providers)
return session
Expand All @@ -53,10 +56,20 @@ def create_inference_session(


def get_onnx_session(
model: OnnxModel, exec_options: ExecutionOptions
model: OnnxModel,
gpu_index: int,
execution_provider: str,
should_tensorrt_fp16: bool,
tensorrt_cache_path: str | None = None,
) -> ort.InferenceSession:
cached = __session_cache.get(model)
if cached is None:
cached = create_inference_session(model, exec_options)
cached = create_inference_session(
model,
gpu_index,
execution_provider,
should_tensorrt_fp16,
tensorrt_cache_path,
)
__session_cache[model] = cached
return cached
30 changes: 0 additions & 30 deletions backend/src/nodes/impl/pytorch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,40 +6,10 @@
import torch
from torch import Tensor

from ...utils.exec_options import ExecutionOptions
from ..image_utils import as_3d
from ..onnx.np_tensor_utils import MAX_VALUES_BY_DTYPE, np_denorm


def to_pytorch_execution_options(options: ExecutionOptions):
# CPU override
if options.full_device == "cpu":
device = "cpu"
# Check for Nvidia CUDA
elif torch.cuda.is_available() and torch.cuda.device_count() > 0:
device = "cuda"
# Check for Apple MPS
elif hasattr(torch, "backends") and hasattr(torch.backends, "mps") and torch.backends.mps.is_built() and torch.backends.mps.is_available(): # type: ignore -- older pytorch versions dont support this technically
device = "mps"
# Check for DirectML
elif hasattr(torch, "dml") and torch.dml.is_available(): # type: ignore
device = "dml"
else:
device = "cpu"

return ExecutionOptions(
device=device,
fp16=options.fp16,
pytorch_gpu_index=options.pytorch_gpu_index,
ncnn_gpu_index=options.ncnn_gpu_index,
onnx_gpu_index=options.onnx_gpu_index,
onnx_execution_provider=options.onnx_execution_provider,
onnx_should_tensorrt_cache=options.onnx_should_tensorrt_cache,
onnx_tensorrt_cache_path=options.onnx_tensorrt_cache_path,
onnx_should_tensorrt_fp16=options.onnx_should_tensorrt_fp16,
)


def bgr_to_rgb(image: Tensor) -> Tensor:
# flip image channels
# https://github.com/pytorch/pytorch/issues/229
Expand Down
Loading

0 comments on commit 1ec090c

Please sign in to comment.