Skip to content

Commit

Permalink
add a UI setting for the amount of ram reserved for the system
Browse files Browse the repository at this point in the history
this limit the available memory for pytorch upscaling
  • Loading branch information
stonerl committed Aug 14, 2023
1 parent dda39bc commit 73d682c
Show file tree
Hide file tree
Showing 9 changed files with 130 additions and 21 deletions.
1 change: 1 addition & 0 deletions backend/src/nodes/impl/pytorch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def to_pytorch_execution_options(options: ExecutionOptions):
onnx_should_tensorrt_cache=options.onnx_should_tensorrt_cache,
onnx_tensorrt_cache_path=options.onnx_tensorrt_cache_path,
onnx_should_tensorrt_fp16=options.onnx_should_tensorrt_fp16,
reserved_system_memory=options.reserved_system_memory,
)


Expand Down
19 changes: 17 additions & 2 deletions backend/src/nodes/utils/exec_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ def __init__(
onnx_should_tensorrt_cache: bool,
onnx_tensorrt_cache_path: str,
onnx_should_tensorrt_fp16: bool,
reserved_system_memory: int,
) -> None:
self.__device = device
self.__fp16 = fp16
Expand All @@ -26,6 +27,7 @@ def __init__(
self.__onnx_should_tensorrt_cache = onnx_should_tensorrt_cache
self.__onnx_tensorrt_cache_path = onnx_tensorrt_cache_path
self.__onnx_should_tensorrt_fp16 = onnx_should_tensorrt_fp16
self.__reserved_system_memory = reserved_system_memory

if (
not os.path.exists(onnx_tensorrt_cache_path)
Expand All @@ -34,7 +36,14 @@ def __init__(
os.makedirs(onnx_tensorrt_cache_path)

logger.debug(
f"PyTorch execution options: fp16: {fp16}, device: {self.full_device} | NCNN execution options: gpu_index: {ncnn_gpu_index} | ONNX execution options: gpu_index: {onnx_gpu_index}, execution_provider: {onnx_execution_provider}, should_tensorrt_cache: {onnx_should_tensorrt_cache}, tensorrt_cache_path: {onnx_tensorrt_cache_path}, should_tensorrt_fp16: {onnx_should_tensorrt_fp16}"
f"PyTorch execution options: fp16: {fp16}, device: {self.full_device} |"
f" NCNN execution options: gpu_index: {ncnn_gpu_index} | ONNX execution"
f" options: gpu_index: {onnx_gpu_index}, execution_provider:"
f" {onnx_execution_provider}, should_tensorrt_cache:"
f" {onnx_should_tensorrt_cache}, tensorrt_cache_path:"
f" {onnx_tensorrt_cache_path}, should_tensorrt_fp16:"
f" {onnx_should_tensorrt_fp16}, reserved_system_memory:"
f" {reserved_system_memory}"
)

@property
Expand Down Expand Up @@ -75,9 +84,13 @@ def onnx_tensorrt_cache_path(self):
def onnx_should_tensorrt_fp16(self):
return self.__onnx_should_tensorrt_fp16

@property
def reserved_system_memory(self):
return self.__reserved_system_memory


__global_exec_options = ExecutionOptions(
"cpu", False, 0, 0, 0, "CPUExecutionProvider", False, "", False
"cpu", False, 0, 0, 0, "CPUExecutionProvider", False, "", False, 1024
)


Expand All @@ -102,6 +115,7 @@ class JsonExecutionOptions(TypedDict):
onnxShouldTensorRtCache: bool
onnxTensorRtCachePath: str
onnxShouldTensorRtFp16: bool
reservedSystemMemory: int


def parse_execution_options(json: JsonExecutionOptions) -> ExecutionOptions:
Expand All @@ -115,4 +129,5 @@ def parse_execution_options(json: JsonExecutionOptions) -> ExecutionOptions:
onnx_should_tensorrt_cache=json["onnxShouldTensorRtCache"],
onnx_tensorrt_cache_path=json["onnxTensorRtCachePath"],
onnx_should_tensorrt_fp16=json["onnxShouldTensorRtFp16"],
reserved_system_memory=json["reservedSystemMemory"],
)
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,8 @@
from typing import Tuple

import numpy as np
import psutil
import torch
from sanic.log import logger

from nodes.impl.pytorch.auto_split import pytorch_auto_split
from nodes.impl.pytorch.types import PyTorchSRModel
from nodes.impl.pytorch.utils import to_pytorch_execution_options
Expand All @@ -20,6 +19,8 @@
from nodes.properties.outputs import ImageOutput
from nodes.utils.exec_options import ExecutionOptions, get_execution_options
from nodes.utils.utils import get_h_w_c
from sanic.log import logger
from system import is_arm_mac

from .. import processing_group

Expand All @@ -39,13 +40,30 @@ def upscale(
device = torch.device(options.full_device)

def estimate():
element_size = 2 if use_fp16 else 4
model_bytes = sum(p.numel() * element_size for p in model.parameters())

if "cuda" in options.full_device:
mem_info: Tuple[int, int] = torch.cuda.mem_get_info(device) # type: ignore
free, _total = mem_info
element_size = 2 if use_fp16 else 4
model_bytes = sum(p.numel() * element_size for p in model.parameters())
budget = int(free * 0.8)

return MaxTileSize(
estimate_tile_size(
budget,
model_bytes,
img,
element_size,
)
)

if is_arm_mac:
total_memory = psutil.virtual_memory().total
reserved_system_memory = options.reserved_system_memory * (1024**2)
pre_budget = int(total_memory - reserved_system_memory)

budget = max(total_memory * 0.2, min(pre_budget, total_memory * 0.8))

return MaxTileSize(
estimate_tile_size(
budget,
Expand All @@ -71,19 +89,27 @@ def estimate():
@processing_group.register(
schema_id="chainner:pytorch:upscale_image",
name="Upscale Image",
description="Upscales an image using a PyTorch Super-Resolution model. \
Select a manual number of tiles if you are having issues with the automatic mode. ",
description=(
"Upscales an image using a PyTorch Super-Resolution model. Select a manual"
" number of tiles if you are having issues with the automatic mode. "
),
icon="PyTorch",
inputs=[
ImageInput().with_id(1),
SrModelInput().with_id(0),
TileSizeDropdown()
.with_id(2)
.with_docs(
"Tiled upscaling is used to allow large images to be upscaled without hitting memory limits.",
"This works by splitting the image into tiles (with overlap), upscaling each tile individually, and seamlessly recombining them.",
"Generally it's recommended to use the largest tile size possible for best performance (with the ideal scenario being no tiling at all), but depending on the model and image size, this may not be possible.",
"If you are having issues with the automatic mode, you can manually select a tile size. Sometimes, a manually selected tile size may be faster than what the automatic mode picks.",
"Tiled upscaling is used to allow large images to be upscaled without"
" hitting memory limits.",
"This works by splitting the image into tiles (with overlap), upscaling"
" each tile individually, and seamlessly recombining them.",
"Generally it's recommended to use the largest tile size possible for best"
" performance (with the ideal scenario being no tiling at all), but"
" depending on the model and image size, this may not be possible.",
"If you are having issues with the automatic mode, you can manually select"
" a tile size. Sometimes, a manually selected tile size may be faster than"
" what the automatic mode picks.",
hint=True,
),
],
Expand Down Expand Up @@ -111,7 +137,8 @@ def upscale_image_node(
scale = model.scale
h, w, c = get_h_w_c(img)
logger.debug(
f"Upscaling a {h}x{w}x{c} image with a {scale}x model (in_nc: {in_nc}, out_nc: {out_nc})"
f"Upscaling a {h}x{w}x{c} image with a {scale}x model (in_nc: {in_nc}, out_nc:"
f" {out_nc})"
)

return convenient_upscale(
Expand Down
1 change: 1 addition & 0 deletions src/common/Backend.ts
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ export interface BackendExecutionOptions {
onnxShouldTensorRtCache: boolean;
onnxTensorRtCachePath: string;
onnxShouldTensorRtFp16: boolean;
reservedSystemMemory: number;
}
export interface BackendRunRequest {
data: BackendJsonNode[];
Expand Down
2 changes: 2 additions & 0 deletions src/common/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,5 @@ export const sanitizedEnv = env;
export const getOnnxTensorRtCacheLocation = (userDataPath: string) => {
return path.join(userDataPath, 'onnx-tensorrt-cache');
};

export const totalMemory = os.totalmem();
1 change: 1 addition & 0 deletions src/main/cli/run.ts
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ const getExecutionOptions = (): BackendExecutionOptions => {
onnxShouldTensorRtCache: getSetting('onnx-should-tensorrt-cache', false),
onnxTensorRtCachePath: getOnnxTensorRtCacheLocation(app.getPath('userData')),
onnxShouldTensorRtFp16: getSetting('onnx-should-tensorrt-fp16', false),
reservedSystemMemory: getSetting('reserved-system-memory', 0),
};
};

Expand Down
67 changes: 59 additions & 8 deletions src/renderer/components/SettingsModal.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import {
Input,
InputGroup,
InputLeftElement,
InputRightElement,
Modal,
ModalBody,
ModalCloseButton,
Expand Down Expand Up @@ -47,7 +48,7 @@ import {
import { BsFillPencilFill, BsPaletteFill } from 'react-icons/bs';
import { FaPython, FaTools } from 'react-icons/fa';
import { useContext } from 'use-context-selector';
import { getOnnxTensorRtCacheLocation, hasTensorRt, isArmMac } from '../../common/env';
import { getOnnxTensorRtCacheLocation, hasTensorRt, isArmMac, totalMemory } from '../../common/env';
import { log } from '../../common/log';
import { ipcRenderer } from '../../common/safeIpc';
import { BackendContext } from '../contexts/BackendContext';
Expand Down Expand Up @@ -270,7 +271,7 @@ const AppearanceSettings = memo(() => {
});

const EnvironmentSettings = memo(() => {
const { useStartupTemplate } = useContext(SettingsContext);
const { useStartupTemplate, useReservedSystemMemory } = useContext(SettingsContext);

const [startupTemplate, setStartupTemplate] = useStartupTemplate;

Expand All @@ -297,11 +298,55 @@ const EnvironmentSettings = memo(() => {
}
}, [startupTemplate, lastDirectory, setStartupTemplate]);

const [reservedSystemMemory, setReservedSystemMemory] = useReservedSystemMemory;

// Maximum amount reserved for the system is 80 % of the total memory
const calculateMaxValue = () => (totalMemory / 1024 ** 2) * 0.8;
// Minimum amount reserved for the system is 20 % of the total memory
const calculateMinValue = () => (totalMemory / 1024 ** 2) * 0.2;

return (
<VStack
divider={<StackDivider />}
w="full"
>
{isArmMac ? (
<SettingsItem
description={`The amount of RAM reserved for the system, that will not be used by chaiNNer for upscaling. (~${
calculateMinValue() / 1024
} GB at minimum and ~${calculateMaxValue() / 1024} GB at maximum.)`}
title="Reserved System Memory"
>
<InputGroup>
<NumberInput
max={calculateMaxValue()}
min={calculateMinValue()}
step={512}
value={reservedSystemMemory}
width={188}
onChange={(number: string) => {
const value = Number(number);

if (!Number.isNaN(value)) {
setReservedSystemMemory(value);
}
}}
>
<NumberInputField
paddingRight="3.7rem"
textAlign="right"
/>
<InputRightElement marginRight="1.5rem">MB</InputRightElement>
<NumberInputStepper>
<NumberIncrementStepper />
<NumberDecrementStepper />
</NumberInputStepper>
</NumberInput>
</InputGroup>
</SettingsItem>
) : (
[]
)}
<SettingsItem
description="Set a chain template to use by default when chaiNNer starts up."
title="Startup Template"
Expand Down Expand Up @@ -332,14 +377,20 @@ const EnvironmentSettings = memo(() => {
// eslint-disable-next-line @typescript-eslint/no-misused-promises
onClick={onButtonClick}
/>

<InputRightElement
width="2.5rem"
zIndex={1}
>
<IconButton
aria-label="clear"
icon={<SmallCloseIcon />}
size="xs"
onClick={() => setStartupTemplate('')}
/>
</InputRightElement>
</InputGroup>
</Tooltip>
<IconButton
aria-label="clear"
icon={<SmallCloseIcon />}
size="xs"
onClick={() => setStartupTemplate('')}
/>
</HStack>
</SettingsItem>
<Text>Looking for the CPU and FP16 settings? They moved to the Python tab.</Text>
Expand Down
8 changes: 8 additions & 0 deletions src/renderer/contexts/SettingsContext.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { useColorMode } from '@chakra-ui/react';
import React, { memo, useEffect } from 'react';
import { createContext } from 'use-context-selector';
import { SchemaId } from '../../common/common-types';
import { totalMemory } from '../../common/env';
import { GetSetState, SetState } from '../helpers/types';
import { useLocalStorage } from '../hooks/useLocalStorage';
import { useMemoArray, useMemoObject } from '../hooks/useMemo';
Expand All @@ -26,6 +27,7 @@ interface Settings {
setSnapToGridAmount: SetState<number>
];
useStartupTemplate: GetSetState<string>;
useReservedSystemMemory: GetSetState<number>;
useSelectTheme: GetSetState<string>;
useAnimateChain: GetSetState<boolean>;
useExperimentalFeatures: GetSetState<boolean>;
Expand All @@ -37,6 +39,8 @@ interface Settings {
useNodeSelectorCollapsed: GetSetState<boolean>;
}

const calculatedReservedMemory = () => (totalMemory / 1024 ** 2) * 0.5;

// TODO: create context requires default values
export const SettingsContext = createContext<Readonly<Settings>>({} as Settings);

Expand Down Expand Up @@ -65,6 +69,9 @@ export const SettingsProvider = memo(({ children }: React.PropsWithChildren<unkn
const useStartupTemplate = useMemoArray(useLocalStorage('startup-template', ''));

const useSelectTheme = useMemoArray(useLocalStorage('theme', 'dark'));
const useReservedSystemMemory = useMemoArray(
useLocalStorage('reserved-system-memory', calculatedReservedMemory())
);

const { setColorMode } = useColorMode();
const [selectThemeColor] = useSelectTheme;
Expand Down Expand Up @@ -116,6 +123,7 @@ export const SettingsProvider = memo(({ children }: React.PropsWithChildren<unkn
useSnapToGrid,
useCheckUpdOnStrtUp,
useStartupTemplate,
useReservedSystemMemory,
useSelectTheme,
useAnimateChain,
useExperimentalFeatures,
Expand Down
3 changes: 3 additions & 0 deletions src/renderer/hooks/useBackendExecutionOptions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ export const useBackendExecutionOptions = (): BackendExecutionOptions => {
useOnnxExecutionProvider,
useOnnxShouldTensorRtCache,
useOnnxShouldTensorRtFp16,
useReservedSystemMemory,
} = useContext(SettingsContext);

const [isCpu] = useIsCpu;
Expand All @@ -37,6 +38,7 @@ export const useBackendExecutionOptions = (): BackendExecutionOptions => {
}, []);

const [onnxShouldTensorRtFp16] = useOnnxShouldTensorRtFp16;
const [reservedSystemMemory] = useReservedSystemMemory;

return {
isCpu,
Expand All @@ -48,5 +50,6 @@ export const useBackendExecutionOptions = (): BackendExecutionOptions => {
onnxShouldTensorRtCache,
onnxTensorRtCachePath,
onnxShouldTensorRtFp16,
reservedSystemMemory,
};
};

0 comments on commit 73d682c

Please sign in to comment.