Skip to content

Commit

Permalink
Fixed settings resetting due to asynchronously queried data not being…
Browse files Browse the repository at this point in the history
… ready (#2081)
  • Loading branch information
RunDevelopment committed Aug 12, 2023
1 parent 10ad7f4 commit 9dabcf5
Showing 1 changed file with 36 additions and 32 deletions.
68 changes: 36 additions & 32 deletions src/renderer/components/SettingsModal.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ const PythonSettings = memo(() => {
const [onnxShouldTensorRtFp16, setOnnxShouldTensorRtFp16] = useOnnxShouldTensorRtFp16;
const isUsingTensorRt = onnxExecutionProvider === 'TensorrtExecutionProvider';

const [nvidiaGpuList, setNvidiaGpuList] = useState<string[]>([]);
const [nvidiaGpuList, setNvidiaGpuList] = useState<string[]>();
useAsyncEffect(
() => ({
supplier: async () => backend.listNvidiaGpus(),
Expand All @@ -386,7 +386,7 @@ const PythonSettings = memo(() => {
);

const [ncnnGPU, setNcnnGPU] = useNcnnGPU;
const [ncnnGpuList, setNcnnGpuList] = useState<string[]>([]);
const [ncnnGpuList, setNcnnGpuList] = useState<string[]>();
useAsyncEffect(
() => ({
supplier: () => backend.listNcnnGpus(),
Expand All @@ -401,8 +401,9 @@ const PythonSettings = memo(() => {
}
}, [isCpu, isFp16, setIsFp16]);

const onnxExecutionProviders = useMemo(
() => [
const onnxExecutionProviders = useMemo(() => {
if (!nvidiaGpuList) return undefined;
return [
...(nvidiaGpuList.length > 0
? [
{
Expand All @@ -423,9 +424,8 @@ const PythonSettings = memo(() => {
},
]
: []),
],
[nvidiaGpuList]
);
];
}, [nvidiaGpuList]);

const onButtonClick = useCallback(async () => {
const fileDir = systemPythonLocation ? path.dirname(systemPythonLocation) : lastDirectory;
Expand Down Expand Up @@ -558,7 +558,7 @@ const PythonSettings = memo(() => {
}}
/>

{!isArmMac && (
{!isArmMac && nvidiaGpuList !== undefined && (
<Dropdown
description="Which GPU to use for PyTorch."
isDisabled={nvidiaGpuList.length === 0}
Expand All @@ -582,29 +582,31 @@ const PythonSettings = memo(() => {
divider={<StackDivider />}
w="full"
>
<Dropdown
description="Which GPU to use for NCNN."
isDisabled={isArmMac ? true : ncnnGpuList.length === 0}
options={
ncnnGpuList.length === 0
? [{ label: 'No supported GPU found', value: -1 }]
: ncnnGpuList.map((gpu, i) => ({
label: `${i}: ${gpu}`,
value: i,
}))
}
title="NCNN GPU"
value={ncnnGPU}
onChange={setNcnnGPU}
/>
{ncnnGpuList !== undefined && (
<Dropdown
description="Which GPU to use for NCNN."
isDisabled={isArmMac ? true : ncnnGpuList.length === 0}
options={
ncnnGpuList.length === 0
? [{ label: 'No supported GPU found', value: -1 }]
: ncnnGpuList.map((gpu, i) => ({
label: `${i}: ${gpu}`,
value: i,
}))
}
title="NCNN GPU"
value={ncnnGPU}
onChange={setNcnnGPU}
/>
)}
</VStack>
</TabPanel>
<TabPanel px={0}>
<VStack
divider={<StackDivider />}
w="full"
>
{!isArmMac && (
{!isArmMac && nvidiaGpuList !== undefined && (
<Dropdown
description="Which GPU to use for ONNX."
isDisabled={nvidiaGpuList.length === 0}
Expand All @@ -621,14 +623,16 @@ const PythonSettings = memo(() => {
onChange={setOnnxGPU}
/>
)}
<Dropdown
description="What provider to use for ONNX."
isDisabled={isArmMac}
options={onnxExecutionProviders}
title="ONNX Execution Provider"
value={onnxExecutionProvider}
onChange={setOnnxExecutionProvider}
/>
{onnxExecutionProviders && (
<Dropdown
description="What provider to use for ONNX."
isDisabled={isArmMac}
options={onnxExecutionProviders}
title="ONNX Execution Provider"
value={onnxExecutionProvider}
onChange={setOnnxExecutionProvider}
/>
)}
{isUsingTensorRt && (
<HStack>
<Toggle
Expand Down

0 comments on commit 9dabcf5

Please sign in to comment.