From 3fb9e33b8aa5b22fcf74b0566b395e1a0ec1c4c6 Mon Sep 17 00:00:00 2001 From: JX278 <113102069+JX278@users.noreply.github.com> Date: Sun, 8 Jan 2023 11:12:54 +0800 Subject: [PATCH 1/7] fix the problem when installing miniconda --- .github/workflows/CPU_inferencce_validation.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/CPU_inferencce_validation.yml b/.github/workflows/CPU_inferencce_validation.yml index d98da643..3a6ea00c 100644 --- a/.github/workflows/CPU_inferencce_validation.yml +++ b/.github/workflows/CPU_inferencce_validation.yml @@ -30,7 +30,7 @@ jobs: cd deepflame-dev wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh wget https://mirrors.edge.kernel.org/ubuntu/pool/main/libf/libffi/libffi7_3.3-4_amd64.deb - sh Miniconda3-latest-Linux-x86_64.sh -b + bash Miniconda3-latest-Linux-x86_64.sh -b . ~/miniconda3/etc/profile.d/conda.sh conda create -n libcantera python=3.8 conda activate libcantera From 1ef063f4ac011022bf780ff3b77ad9d8ef4a12f2 Mon Sep 17 00:00:00 2001 From: JX278 <113102069+JX278@users.noreply.github.com> Date: Sun, 8 Jan 2023 12:57:51 +0800 Subject: [PATCH 2/7] debug 0D case --- .github/workflows/CPU_inferencce_validation.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/CPU_inferencce_validation.yml b/.github/workflows/CPU_inferencce_validation.yml index 3a6ea00c..926722c2 100644 --- a/.github/workflows/CPU_inferencce_validation.yml +++ b/.github/workflows/CPU_inferencce_validation.yml @@ -47,8 +47,10 @@ jobs: OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 OMPI_MCA_btl_vader_single_copy_mechanism: none run: - /bin/bash -c "git clone https://github.com/deepcombustion/deepcombustion.git && cp -r deepcombustion/DeePCK/Model/HE04_Hydrogen_ESH2_GMS_sub_20221101/ mechanisms/ && source ~/miniconda3/etc/profile.d/conda.sh && conda activate libcantera && source /opt/openfoam7/etc/bashrc && . configure.sh --use_pytorch && source ./bashrc && . install.sh && cd test && ./Allrun && conda deactivate " - + /bin/bash -c "git clone https://github.com/deepcombustion/deepcombustion.git && cp -r deepcombustion/DeePCK/Model/HE04_Hydrogen_ESH2_GMS_sub_20221101/ mechanisms/ && source ~/miniconda3/etc/profile.d/conda.sh && conda activate libcantera && source /opt/openfoam7/etc/bashrc + && . configure.sh --use_pytorch && source ./bashrc && . install.sh && cd examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator && blockMesh && decomposePar && mpirun -np 4 /__w/deepflame-dev/deepflame-dev/bin/df0DFoam -parallel && ./Allclean + && cd ../../../../../test && ./Allrun && conda deactivate " + - name: test run: | cd test From 053196e51523dd96a89d4c77006a6cf2afea8ec7 Mon Sep 17 00:00:00 2001 From: JX278 <113102069+JX278@users.noreply.github.com> Date: Sun, 8 Jan 2023 14:01:49 +0800 Subject: [PATCH 3/7] update inference to debug --- .../H2/pytorchIntegrator/inference.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py b/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py index 2ceabdf4..071a292d 100644 --- a/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py +++ b/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py @@ -10,7 +10,7 @@ import torch.profiler import os - +print('enter the inference file') torch.set_printoptions(precision=10) @@ -139,7 +139,7 @@ def forward(self, x): model0.to(device=device) model1.to(device=device) model2.to(device=device) - + print("load models onto the device") if len(device_ids) > 1: model0 = torch.nn.DataParallel(model0, device_ids=device_ids) model1 = torch.nn.DataParallel(model1, device_ids=device_ids) @@ -165,7 +165,7 @@ def inference(vec0, vec1, vec2): input0_ = torch.from_numpy(vec0).double().to(device=device) #cast ndarray to torch tensor input1_ = torch.from_numpy(vec1).double().to(device=device) #cast ndarray to torch tensor input2_ = torch.from_numpy(vec2).double().to(device=device) #cast ndarray to torch tensor - + print('start pre-processing') # pre_processing rho0 = input0_[:, 0].unsqueeze(1) input0_Y = input0_[:, 3:].clone() @@ -191,12 +191,12 @@ def inference(vec0, vec1, vec2): input2_normalized = (input2_bct - Xmu2) / Xstd2 # input2_normalized[:, -1] = 0 #set Y_AR to 0 input2_normalized = input2_normalized.float() - + print('inferencing') #inference output0_normalized = model0(input0_normalized) output1_normalized = model1(input1_normalized) output2_normalized = model2(input2_normalized) - + print('post-processing starts') # post_processing output0_bct = (output0_normalized * Ystd0 + Ymu0) * delta_t + input0_bct output0_Y = (lamda * output0_bct[:, 2:] + 1)**(1 / lamda) From 3188e29241aad6988523a83f530d0881ea9174e3 Mon Sep 17 00:00:00 2001 From: JX278 <113102069+JX278@users.noreply.github.com> Date: Sun, 8 Jan 2023 14:32:42 +0800 Subject: [PATCH 4/7] Update inference.py --- .../zeroD_cubicReactor/H2/pytorchIntegrator/inference.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py b/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py index 071a292d..dfa3f360 100644 --- a/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py +++ b/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py @@ -10,7 +10,6 @@ import torch.profiler import os -print('enter the inference file') torch.set_printoptions(precision=10) @@ -165,7 +164,7 @@ def inference(vec0, vec1, vec2): input0_ = torch.from_numpy(vec0).double().to(device=device) #cast ndarray to torch tensor input1_ = torch.from_numpy(vec1).double().to(device=device) #cast ndarray to torch tensor input2_ = torch.from_numpy(vec2).double().to(device=device) #cast ndarray to torch tensor - print('start pre-processing') + # pre_processing rho0 = input0_[:, 0].unsqueeze(1) input0_Y = input0_[:, 3:].clone() @@ -191,12 +190,12 @@ def inference(vec0, vec1, vec2): input2_normalized = (input2_bct - Xmu2) / Xstd2 # input2_normalized[:, -1] = 0 #set Y_AR to 0 input2_normalized = input2_normalized.float() - print('inferencing') + #inference output0_normalized = model0(input0_normalized) output1_normalized = model1(input1_normalized) output2_normalized = model2(input2_normalized) - print('post-processing starts') + # post_processing output0_bct = (output0_normalized * Ystd0 + Ymu0) * delta_t + input0_bct output0_Y = (lamda * output0_bct[:, 2:] + 1)**(1 / lamda) From 0cd09af5987c77bf620df784935b8fa97fb26a81 Mon Sep 17 00:00:00 2001 From: JX278 <113102069+JX278@users.noreply.github.com> Date: Sun, 8 Jan 2023 14:33:33 +0800 Subject: [PATCH 5/7] Update inference.py --- .../zeroD_cubicReactor/H2/pytorchIntegrator/inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py b/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py index dfa3f360..17d70510 100644 --- a/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py +++ b/examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator/inference.py @@ -138,7 +138,7 @@ def forward(self, x): model0.to(device=device) model1.to(device=device) model2.to(device=device) - print("load models onto the device") + if len(device_ids) > 1: model0 = torch.nn.DataParallel(model0, device_ids=device_ids) model1 = torch.nn.DataParallel(model1, device_ids=device_ids) From 1fc3a983b35cb7f8954d093c9edc19b3e4dff3a0 Mon Sep 17 00:00:00 2001 From: JX278 <113102069+JX278@users.noreply.github.com> Date: Sun, 8 Jan 2023 14:36:41 +0800 Subject: [PATCH 6/7] Update CPU_inferencce_validation.yml --- .github/workflows/CPU_inferencce_validation.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/CPU_inferencce_validation.yml b/.github/workflows/CPU_inferencce_validation.yml index 926722c2..11fa1ae2 100644 --- a/.github/workflows/CPU_inferencce_validation.yml +++ b/.github/workflows/CPU_inferencce_validation.yml @@ -48,8 +48,8 @@ jobs: OMPI_MCA_btl_vader_single_copy_mechanism: none run: /bin/bash -c "git clone https://github.com/deepcombustion/deepcombustion.git && cp -r deepcombustion/DeePCK/Model/HE04_Hydrogen_ESH2_GMS_sub_20221101/ mechanisms/ && source ~/miniconda3/etc/profile.d/conda.sh && conda activate libcantera && source /opt/openfoam7/etc/bashrc - && . configure.sh --use_pytorch && source ./bashrc && . install.sh && cd examples/df0DFoam/zeroD_cubicReactor/H2/pytorchIntegrator && blockMesh && decomposePar && mpirun -np 4 /__w/deepflame-dev/deepflame-dev/bin/df0DFoam -parallel && ./Allclean - && cd ../../../../../test && ./Allrun && conda deactivate " + && . configure.sh --use_pytorch && source ./bashrc && . install.sh + && cd test && ./Allrun && conda deactivate " - name: test run: | From 2cf84139bfacdfd7e758c37c519be0c75750fb6a Mon Sep 17 00:00:00 2001 From: JX278 <113102069+JX278@users.noreply.github.com> Date: Sun, 8 Jan 2023 15:29:53 +0800 Subject: [PATCH 7/7] Update YEqn.H --- applications/solvers/dfLowMachFoam/YEqn.H | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/applications/solvers/dfLowMachFoam/YEqn.H b/applications/solvers/dfLowMachFoam/YEqn.H index 173d0e4d..8e0befd6 100644 --- a/applications/solvers/dfLowMachFoam/YEqn.H +++ b/applications/solvers/dfLowMachFoam/YEqn.H @@ -21,7 +21,10 @@ forAll(Y, i) } const surfaceScalarField phiUc = linearInterpolate(sumYDiffError) & mesh.Sf(); -MPI_Barrier(PstreamGlobals::MPI_COMM_FOAM); +//MPI_Barrier(PstreamGlobals::MPI_COMM_FOAM); +label flag_mpi_init; +MPI_Initialized(&flag_mpi_init); +if(flag_mpi_init) MPI_Barrier(PstreamGlobals::MPI_COMM_FOAM); end = std::clock(); time_monitor_corrDiff += double(end - start) / double(CLOCKS_PER_SEC); @@ -30,8 +33,8 @@ time_monitor_corrDiff += double(end - start) / double(CLOCKS_PER_SEC); { std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); combustion->correct(); - label flag_mpi_init; - MPI_Initialized(&flag_mpi_init); + //label flag_mpi_init; + //MPI_Initialized(&flag_mpi_init); if(flag_mpi_init) MPI_Barrier(PstreamGlobals::MPI_COMM_FOAM); std::chrono::steady_clock::time_point stop = std::chrono::steady_clock::now(); std::chrono::duration processingTime = std::chrono::duration_cast>(stop - start);