Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
53 commits
Select commit Hold shift + click to select a range
3b23ad0
Fix nifti transform failing at cleanup previous stage
aristizabal95 Jan 19, 2024
7da8400
add tests
hasan7n Jan 25, 2024
7d5734b
Merge branch 'separate-stages' of https://github.com/aristizabal95/Fe…
aristizabal95 Jan 25, 2024
5b4ea40
Merge branch 'fets_2.0' of https://github.com/aristizabal95/FeTS-AI-F…
aristizabal95 Jan 25, 2024
2455563
modify dockerfile.dev
hasan7n Jan 26, 2024
ad03a74
use absolute paths to support singularity
hasan7n Jan 26, 2024
99a2c73
modify env vars to support singularity
hasan7n Jan 26, 2024
7efa53a
define runtime env vars in one place
hasan7n Jan 26, 2024
ce7b8fe
add singularity tests
hasan7n Jan 26, 2024
6cabe3f
Merge remote-tracking branch 'origin/separate-stages' into separate-s…
hasan7n Jan 26, 2024
448386f
Merge branch 'separate-stages' of https://github.com/aristizabal95/Fe…
aristizabal95 Jan 26, 2024
8b93a14
Write RANO Federation instructions
aristizabal95 Jan 26, 2024
b83904b
Handle IndexError for non-prep niftis
aristizabal95 Jan 31, 2024
987daf1
Move model execution to subprocesses to eliminate memleak
aristizabal95 Feb 16, 2024
1ef8939
Fix issue not catching invalid niftis
aristizabal95 Feb 16, 2024
3df8792
Merge pull request #4 from aristizabal95/memleak-fixes
aristizabal95 Feb 16, 2024
5c6e364
Use versioned MLCubes
aristizabal95 Feb 17, 2024
559d7ba
Return the total count of negative values
aristizabal95 Feb 17, 2024
9d9b9c9
Merge branch 'separate-stages' of https://github.com/aristizabal95/Fe…
aristizabal95 Feb 17, 2024
f58aefa
Revert "Move model execution to subprocesses to eliminate memleak"
aristizabal95 Feb 18, 2024
7ea221d
Move memleaking functions into subprocesses
aristizabal95 Feb 18, 2024
a59572a
Pass input masks to img gen wrapper
aristizabal95 Feb 18, 2024
a6412bf
udpate mlcube to latest version
aristizabal95 Feb 19, 2024
9988405
Use torch with cuda 11.8
aristizabal95 Feb 19, 2024
8284b60
Merge branch 'release' into memleak-fixes
aristizabal95 Feb 19, 2024
24788a9
Merge pull request #5 from aristizabal95/memleak-fixes
aristizabal95 Feb 19, 2024
0ba6a9d
Fix issues with release version
aristizabal95 Feb 26, 2024
ad6e3b2
Merge branch 'fets_2.0' of https://github.com/FeTS-AI/Front-End into …
aristizabal95 Feb 28, 2024
067c6de
Move env vars declaration to prepare command
aristizabal95 Mar 4, 2024
ec3eb6f
Move adjusatble tempdir logic to utilities
aristizabal95 Mar 4, 2024
c3f7077
Generate debugging logs from BratsPipeline
aristizabal95 Mar 4, 2024
af5ecb3
Update mlcube image to 1.0.2
aristizabal95 Mar 4, 2024
264e3fb
Filter out files on subject-tp folder for modality num check
aristizabal95 Mar 5, 2024
6f3cbef
Add versions to requirements
aristizabal95 Mar 14, 2024
2fcbac3
Remove assertion error from sanity check
aristizabal95 Mar 14, 2024
bdd6bc9
Make sure print happens on failed sanity check
aristizabal95 Mar 14, 2024
ed952b8
Update mlcube image to 1.0.3
aristizabal95 Mar 14, 2024
47026d9
Fix error if subject output path already existed
aristizabal95 Mar 15, 2024
e32edff
Handle status corruption
aristizabal95 Mar 21, 2024
1e2bcb0
Provide more details on missing/extra modalities
aristizabal95 Mar 27, 2024
07dc092
Implement artifact cleanup to reduce storage consumption
aristizabal95 May 7, 2024
373214a
Fix permission denied when overwriting extract contents
aristizabal95 May 15, 2024
7ddf3b8
Update data prep mlcube to 1.0.5
aristizabal95 May 15, 2024
06f9181
Add verbosity to report generation. Fix lost progress issue
aristizabal95 Jun 11, 2024
1e3ac0c
Remove traceback for sanity check failure
aristizabal95 Jun 11, 2024
6afc374
Update container version
aristizabal95 Jun 11, 2024
04b25cb
Use temporary files for report writes
aristizabal95 Jul 2, 2024
0f9ae38
Write to a normal temporary file instead of tempfile
aristizabal95 Jul 2, 2024
8b8fa6f
Increase version number
aristizabal95 Jul 2, 2024
1c22016
Provide more verbose stdout for identifying errors
aristizabal95 Jul 30, 2024
b2fcf6e
Handle cases where next stage is None
aristizabal95 Jul 31, 2024
2b48703
update image version
aristizabal95 Jul 31, 2024
2c846dc
Display contents of finalized folder on error
aristizabal95 Aug 28, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ LABEL authors="FeTS_Admin <admin@fets.ai>"
RUN apt-get update && apt-get update --fix-missing && apt-get install -y libnss3 libnspr4 libxcursor1 libxcursor-dev libasound2 libdbus-1-dev libglfw3-dev libgles2-mesa-dev ffmpeg libsm6 libxext6 python3.8 python3.8-venv python3.8-dev python3-setuptools

ENV PATH=/CaPTk/bin/qt/5.12.1/bin:/CaPTk/bin/qt/5.12.1/libexec:$PATH
ENV CMAKE_PREFIX_PATH=/CaPTk/bin/ITK-build:/CaPTk/bin/DCMTK-build:/CaPTk/bin/qt/5.12.1/lib/cmake/Qt5:$CMAKE_PREFIX_PATH

ENV CMAKE_PREFIX_PATH=/CaPTk/bin/ITK-build:/CaPTk/bin/DCMTK-build:/CaPTk/bin/qt/5.12.1/lib/cmake/Qt5
ENV DCMTK_DIR=/CaPTk/bin/DCMTK-build
RUN pwd && ls -l

WORKDIR /Front-End
Expand Down Expand Up @@ -88,13 +88,10 @@ RUN pip install git+https://github.com/mlcommons/GaNDLF@616b37bafad8f89d5c816a88
# setup a separate env for nnunet
RUN python -m venv /nnunet_env && /nnunet_env/bin/pip install --upgrade pip

RUN /nnunet_env/bin/pip install torch==1.12.1+cu102 torchvision==0.13.1+cu102 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu102
RUN /nnunet_env/bin/pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118

RUN /nnunet_env/bin/pip install git+https://github.com/MIC-DKFZ/nnUNet.git@nnunetv1

ENV nnUNet_raw_data_base="/tmp/nnUNet_raw_data_base"
ENV nnUNet_preprocessed="/tmp/nnUNet_preprocessed"
# see https://docs.docker.com/config/containers/resource_constraints/#gpu for detailed explanation
ENV CUDA_VISIBLE_DEVICES="0"

COPY ./mlcubes/data_preparation/project /project
Expand Down
Binary file added docs/assets/img/rano_docker.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
3 changes: 2 additions & 1 deletion mlcubes/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,5 @@
*/mlcube/workspace/*
!requirements.txt
!*/mlcube/workspace/parameters.yaml
models
models
tmpmodel
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
tmpmodel
tmpmodel

5 changes: 5 additions & 0 deletions mlcubes/data_preparation/mlcube/clean.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
rm -rf workspace/data
rm -rf workspace/labels
rm -rf workspace/metadata
rm -rf workspace/report
rm -rf workspace/statistics
2 changes: 1 addition & 1 deletion mlcubes/data_preparation/mlcube/mlcube.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ platform:

docker:
# Image name
image: mlcommons/rano-data-prep-mlcube:latest
image: mlcommons/rano-data-prep-mlcube:1.0.10
# Docker build context relative to $MLCUBE_ROOT. Default is `build`.
build_context: "../project"
# Docker file name within docker build context, default is `Dockerfile`.
Expand Down
63 changes: 63 additions & 0 deletions mlcubes/data_preparation/mlcube/tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@

DATA=./workspace/data

run() {
mlcube run --mlcube ./mlcube.yaml --task prepare --network=none --mount=ro \
report_file=report/report.yaml \
labels_path=input_data \
-Pdocker.cpu_args="-u $(id -u):$(id -g)" \
-Pdocker.gpu_args="-u $(id -u):$(id -g)"
}

run_other() {
mlcube run --mlcube ./mlcube.yaml --task sanity_check --network=none --mount=ro \
-Pdocker.cpu_args="-u $(id -u):$(id -g)" \
-Pdocker.gpu_args="-u $(id -u):$(id -g)"

mlcube run --mlcube ./mlcube.yaml --task statistics --network=none --mount=ro \
output_path=statistics/statistics.yaml \
-Pdocker.cpu_args="-u $(id -u):$(id -g)" \
-Pdocker.gpu_args="-u $(id -u):$(id -g)"
}

STARTTIME=$(date +%s.%N)


run

# manual review
cp $DATA/tumor_extracted/DataForQC/AAAC_1/2008.03.31/TumorMasksForQC/AAAC_1_2008.03.31_tumorMask_model_0.nii.gz \
$DATA/tumor_extracted/DataForQC/AAAC_1/2008.03.31/TumorMasksForQC/finalized/AAAC_1_2008.03.31_tumorMask_model_0.nii.gz

cp $DATA/tumor_extracted/DataForQC/AAAC_1/2012.01.02/TumorMasksForQC/AAAC_1_2012.01.02_tumorMask_model_0.nii.gz \
$DATA/tumor_extracted/DataForQC/AAAC_1/2012.01.02/TumorMasksForQC/finalized/AAAC_1_2012.01.02_tumorMask_model_0.nii.gz

cp $DATA/tumor_extracted/DataForQC/AAAC_2/2001.01.01/TumorMasksForQC/AAAC_2_2001.01.01_tumorMask_model_0.nii.gz \
$DATA/tumor_extracted/DataForQC/AAAC_2/2001.01.01/TumorMasksForQC/finalized/AAAC_2_2001.01.01_tumorMask_model_0.nii.gz
# end manual review

run &
PID=$!

# prompt response
BREAK=0
while [ $BREAK -eq "0" ]
do
if [ -f $DATA/".prompt.txt" ];
then BREAK=1;
else
sleep 0.1s;
fi

done

echo -n "y" >> $DATA/.response.txt
# end prompt response

wait ${PID}

ENDTIME=$(date +%s.%N)
DIFF=$(echo "$ENDTIME - $STARTTIME" | bc)
echo $DIFF

run_other
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
run_other
run_other

62 changes: 62 additions & 0 deletions mlcubes/data_preparation/mlcube/tests_sing.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@

DATA=./workspace/data

run() {
mlcube run --mlcube ./mlcube.yaml --task prepare --network=none --mount=ro --platform=singularity \
report_file=report/report.yaml \
labels_path=input_data \
-Psingularity.run_args="-nce"
}

run_other() {
mlcube run --mlcube ./mlcube.yaml --task sanity_check --network=none --mount=ro --platform=singularity \
-Psingularity.run_args="-nce"


mlcube run --mlcube ./mlcube.yaml --task statistics --network=none --mount=ro --platform=singularity \
output_path=statistics/statistics.yaml \
-Psingularity.run_args="-nce"

}

STARTTIME=$(date +%s.%N)


run

# manual review
cp $DATA/tumor_extracted/DataForQC/AAAC_1/2008.03.31/TumorMasksForQC/AAAC_1_2008.03.31_tumorMask_model_0.nii.gz \
$DATA/tumor_extracted/DataForQC/AAAC_1/2008.03.31/TumorMasksForQC/finalized/AAAC_1_2008.03.31_tumorMask_model_0.nii.gz

cp $DATA/tumor_extracted/DataForQC/AAAC_1/2012.01.02/TumorMasksForQC/AAAC_1_2012.01.02_tumorMask_model_0.nii.gz \
$DATA/tumor_extracted/DataForQC/AAAC_1/2012.01.02/TumorMasksForQC/finalized/AAAC_1_2012.01.02_tumorMask_model_0.nii.gz

cp $DATA/tumor_extracted/DataForQC/AAAC_2/2001.01.01/TumorMasksForQC/AAAC_2_2001.01.01_tumorMask_model_0.nii.gz \
$DATA/tumor_extracted/DataForQC/AAAC_2/2001.01.01/TumorMasksForQC/finalized/AAAC_2_2001.01.01_tumorMask_model_0.nii.gz
# end manual review

run &
PID=$!

# prompt response
BREAK=0
while [ $BREAK -eq "0" ]
do
if [ -f $DATA/".prompt.txt" ];
then BREAK=1;
else
sleep 0.1s;
fi

done

echo -n "y" >> $DATA/.response.txt
# end prompt response

wait ${PID}

ENDTIME=$(date +%s.%N)
DIFF=$(echo "$ENDTIME - $STARTTIME" | bc)
echo $DIFF

run_other
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
run_other
run_other

17 changes: 17 additions & 0 deletions mlcubes/data_preparation/project/Dockerfile.dev
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
FROM hasan7/baselocal:0.0.0

COPY ./atlasImage_0.125.nii.gz /project
COPY ./tmpmodel /project

# use a downsampled reference image for DICOM to NIFTI conversion
RUN mv /project/atlasImage_0.125.nii.gz /Front-End/bin/install/appdir/usr/data/sri24/atlasImage.nii.gz

# remove heavy brain extraction models
RUN rm -rf /project/stages/data_prep_models/brain_extraction/model_0/
RUN rm -rf /project/stages/data_prep_models/brain_extraction/model_1/

# use dummy brain extraction models
RUN cp -r /project/tmpmodel /project/stages/data_prep_models/brain_extraction/model_0
RUN mv /project/tmpmodel /project/stages/data_prep_models/brain_extraction/model_1

ENTRYPOINT ["python", "/project/mlcube.py"]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
ENTRYPOINT ["python", "/project/mlcube.py"]
ENTRYPOINT ["python", "/project/mlcube.py"]

12 changes: 12 additions & 0 deletions mlcubes/data_preparation/project/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# How to run tests

1. Download and extract (sha256: 701fbba8b253fc5b2f54660837c493a38dec986df9bdbf3d97f07c8bc276a965):
<https://storage.googleapis.com/medperf-storage/rano_test_assets/dev.tar.gz>

2. Move `additional_files` and `input_data` to the mlcube workspace
3. Move `tmpmodel` and `atlasImage_0.125.nii.gz` to the mlcube project folder

4. Build the base docker image from the repo's root folder Dockerfile
5. Build the dev docker image using `Dockerfile.dev` in the mlcube project folder.
6. Then change the docker image name in `mlcube.yaml` according to step 5.
7. Then go to `mlcube` folder and run the tests scripts
17 changes: 9 additions & 8 deletions mlcubes/data_preparation/project/mlcube.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
"""MLCube handler file"""
import os
import typer
import subprocess

import shutil

app = typer.Typer()


def exec_python(cmd: str) -> None:
def exec_python(cmd: str, check_for_failure=True) -> None:
"""Execute a python script as a subprocess

Args:
Expand All @@ -15,7 +16,8 @@ def exec_python(cmd: str) -> None:
splitted_cmd = cmd.split()
process = subprocess.Popen(splitted_cmd, cwd=".")
process.wait()
assert process.returncode == 0, f"command failed: {cmd}"
if check_for_failure:
assert process.returncode == 0, f"command failed: {cmd}"


@app.command("prepare")
Expand All @@ -29,10 +31,9 @@ def prepare(
report_file: str = typer.Option(..., "--report_file"),
metadata_path: str = typer.Option(..., "--metadata_path"),
):
cmd = f"python3 project/prepare.py --data_path={data_path} --labels_path={labels_path} --models_path={models_path} --data_out={output_path} --labels_out={output_labels_path} --report={report_file} --parameters={parameters_file} --metadata_path={metadata_path}"
cmd = f"python3 /project/prepare.py --data_path={data_path} --labels_path={labels_path} --models_path={models_path} --data_out={output_path} --labels_out={output_labels_path} --report={report_file} --parameters={parameters_file} --metadata_path={metadata_path}"
exec_python(cmd)


@app.command("sanity_check")
def sanity_check(
data_path: str = typer.Option(..., "--data_path"),
Expand All @@ -41,7 +42,7 @@ def sanity_check(
metadata_path: str = typer.Option(..., "--metadata_path"),
):
# Modify the sanity_check command as needed
cmd = f"python3 project/sanity_check.py --data_path={data_path} --labels_path={labels_path} --metadata={metadata_path}"
cmd = f"python3 /project/sanity_check.py --data_path={data_path} --labels_path={labels_path} --metadata={metadata_path}"
exec_python(cmd)


Expand All @@ -54,8 +55,8 @@ def sanity_check(
out_path: str = typer.Option(..., "--output_path"),
):
# Modify the statistics command as needed
cmd = f"python3 project/statistics.py --data_path={data_path} --labels_path={labels_path} --out_file={out_path} --metadata={metadata_path}"
exec_python(cmd)
cmd = f"python3 /project/statistics.py --data_path={data_path} --labels_path={labels_path} --out_file={out_path} --metadata={metadata_path}"
exec_python(cmd, check_for_failure=False) # Don't throw an error if it fails, to avoid traceback and confusion from users


if __name__ == "__main__":
Expand Down
18 changes: 17 additions & 1 deletion mlcubes/data_preparation/project/prepare.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import shutil
import argparse
import pandas as pd
import yaml
Expand Down Expand Up @@ -157,10 +158,25 @@ def init_report(args) -> pd.DataFrame:
def main():
args = setup_argparser()

os.environ["RESULTS_FOLDER"] = os.path.join(args.models, "nnUNet_trained_models")
output_path = args.data_out
models_path = args.models

tmpfolder = os.path.join(output_path, ".tmp")
cbica_tmpfolder = os.path.join(tmpfolder, ".cbicaTemp")
os.environ["TMPDIR"] = tmpfolder
os.environ["CBICA_TEMP_DIR"] = cbica_tmpfolder
os.makedirs(tmpfolder, exist_ok=True)
os.makedirs(cbica_tmpfolder, exist_ok=True)
os.environ["RESULTS_FOLDER"] = os.path.join(models_path, "nnUNet_trained_models")
os.environ["nnUNet_raw_data_base"] = os.path.join(tmpfolder, "nnUNet_raw_data_base")
os.environ["nnUNet_preprocessed"] = os.path.join(tmpfolder, "nnUNet_preprocessed")

report = init_report(args)
pipeline = init_pipeline(args)
pipeline.run(report, args.report)

# cleanup tmp folder
shutil.rmtree(tmpfolder, ignore_errors=True)

if __name__ == "__main__":
main()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add a new line at the end for consistency.

12 changes: 6 additions & 6 deletions mlcubes/data_preparation/project/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
typer
pandas
PyYAML
typer==0.9.0
pandas==1.5.3
PyYAML==6.0.1
# Include all your requirements here
SimpleITK
tqdm
scikit-image
SimpleITK==2.3.1
tqdm==4.66.2
scikit-image==0.21.0
FigureGenerator==0.0.4
gandlf==0.0.16
labelfusion==1.0.14
Expand Down
6 changes: 3 additions & 3 deletions mlcubes/data_preparation/project/sanity_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ def sanity_check(data_path: str, labels_path: str):
"""
# Here you must add all the checks you consider important regarding the
# state of the data
assert has_prepared_folder_structure(
data_path, labels_path
), "The contents of the labels and data don't ressemble a prepared dataset"
if not has_prepared_folder_structure(data_path, labels_path):
print("The contents of the labels and data don't resemble a prepared dataset", flush=True)
exit(1)


if __name__ == "__main__":
Expand Down
2 changes: 2 additions & 0 deletions mlcubes/data_preparation/project/stages/comparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ def __report_success(
return report

def could_run(self, index: Union[str, int], report: DataFrame) -> bool:
print(f"Checking if {self.name} can run")
# Ensure a single reviewed segmentation file exists
path = self.__get_input_path(index)
gt_path = self.__get_backup_path(index)
Expand All @@ -125,6 +126,7 @@ def could_run(self, index: Union[str, int], report: DataFrame) -> bool:

prev_hash = report.loc[index]["segmentation_hash"]
hash_changed = prev_hash != reviewed_hash
print(f"{path_exists=} and {contains_case=} and {gt_path_exists=} and {hash_changed=}")
is_valid = path_exists and contains_case and gt_path_exists and hash_changed

return is_valid
Expand Down
2 changes: 2 additions & 0 deletions mlcubes/data_preparation/project/stages/confirm.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,13 +127,15 @@ def __process_row(self, row: pd.Series) -> pd.Series:
return row

def could_run(self, report: DataFrame) -> bool:
print(f"Checking if {self.name} can run")
# could run once all cases have been compared to the ground truth
missing_voxels = report["num_changed_voxels"].isnull().values.any()
prev_path_exists = os.path.exists(self.prev_stage_path)
empty_prev_path = True
if prev_path_exists:
empty_prev_path = len(os.listdir(self.prev_stage_path)) == 0

print(f"{prev_path_exists=} and not {empty_prev_path=} and not {missing_voxels=}")
return prev_path_exists and not empty_prev_path and not missing_voxels

def execute(self, report: DataFrame) -> Tuple[DataFrame, bool]:
Expand Down
6 changes: 5 additions & 1 deletion mlcubes/data_preparation/project/stages/extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,11 @@ def could_run(self, index: Union[str, int], report: pd.DataFrame) -> bool:
Returns:
bool: Wether this stage could be executed for the given case
"""
print(f"Checking if {self.name} can run")
prev_paths = self.__get_paths(index, self.prev_path, self.prev_subpath)
return all([os.path.exists(path) for path in prev_paths])
is_valid = all([os.path.exists(path) for path in prev_paths])
print(f"{is_valid=}")
return is_valid

def execute(
self, index: Union[str, int], report: pd.DataFrame
Expand Down Expand Up @@ -97,6 +100,7 @@ def __copy_case(self, index: Union[str, int]):
prev_paths = self.__get_paths(index, self.prev_path, self.prev_subpath)
copy_paths = self.__get_paths(index, self.out_path, self.prev_subpath)
for prev, copy in zip(prev_paths, copy_paths):
shutil.rmtree(copy, ignore_errors=True)
shutil.copytree(prev, copy, dirs_exist_ok=True)

def _process_case(self, index: Union[str, int]):
Expand Down
Loading