/
DNN_tools.py
80 lines (69 loc) · 3.56 KB
/
DNN_tools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import os
import sys
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
# 保存图片的一些设置
isShowPic = 1
Leftp = 0.18
Bottomp = 0.18
Widthp = 0.88-Leftp
Heightp = 0.9-Bottomp
pos = [Leftp, Bottomp, Widthp, Heightp]
# 图片保存函数
def mySaveFig(pltm, fntmp, fp=0, ax=0, isax=0, iseps=0, isShowPic=0):
if isax == 1:
pltm.rc('xtick', labelsize=18)
pltm.rc('ytick', labelsize=18)
ax.set_position(pos, which='both')
fnm = '%s.png'%(fntmp)
pltm.savefig(fnm)
if iseps:
fnm = '%s.eps'%(fntmp)
pltm.savefig(fnm, format='eps', dpi=600)
if fp != 0:
fp.savefig("%s.pdf"%(fntmp), bbox_inches='tight')
if isShowPic == 1:
pltm.show()
elif isShowPic == -1:
return
else:
pltm.close()
# 日志记数
def log_string(out_str, log_out):
log_out.write(out_str + '\n') # 将字符串写到文件log_fileout中去,末尾加换行
log_out.flush() # 清空缓存区
# flush() 方法是用来刷新缓冲区的,即将缓冲区中的数据立刻写入文件,同时清空缓冲区,不需要是被动的等待输出缓冲区写入。
# 一般情况下,文件关闭后会自动刷新缓冲区,但有时你需要在关闭前刷新它,这时就可以使用 flush() 方法。
def print_and_log_train_one_epoch(i_epoch, run_time, learn_rate, penalty_bd, penalty_powu, pwb, loss_it_tmp, loss_bd_tmp,
loss_tmp, udu, train_mse_tmp, train_res_tmp, log_out=None):
# 将运行结果打印出来
print('train epoch: %d, time: %.3f' % (i_epoch, run_time))
print('learning rate: %.10f' % learn_rate)
print('boundary penalty: %.10f' % penalty_bd)
print('penalty for product of normal and scale: %.10f' % penalty_powu)
print('weights and biases with penalty: %.10f' % pwb)
print('loss_it for training: %.10f' % loss_it_tmp)
print('loss_bd for training: %.10f' % loss_bd_tmp)
print('loss for training: %.10f' % loss_tmp)
print('The product of normal and scale for training: %.10f' % udu)
print('solution mean square error for training: %.10f' % train_mse_tmp)
print('solution residual error for training: %.10f\n' % train_res_tmp)
log_string('train epoch: %d,time: %.10f' % (i_epoch, run_time), log_out)
log_string('learning rate: %.10f' % learn_rate, log_out)
log_string('boundary penalty: %.10f' % penalty_bd, log_out)
log_string('penalty for product of normal and scale: %.10f' % penalty_powu, log_out)
log_string('weights and biases with penalty: %.10f' % pwb, log_out)
log_string('loss_it for training: %.10f' % loss_it_tmp, log_out)
log_string('loss_bd for training: %.10f' % loss_bd_tmp, log_out)
log_string('loss for training: %.10f' % loss_tmp, log_out)
log_string('The product of normal and scale for training: %.10f' % udu, log_out)
log_string('solution mean square error for training: %.10f' % train_mse_tmp, log_out)
log_string('solution residual error for training: %.10f\n' % train_res_tmp, log_out)
def print_and_log_test_one_epoch(mse2test, res2test, log_out=None):
# 将运行结果打印出来
print('mean square error of predict and real for testing: %.10f' % mse2test)
print('residual error of predict and real for testing: %.10f\n' % res2test)
log_string('mean square error of predict and real for testing: %.10f' % mse2test, log_out)
log_string('residual error of predict and real for testing: %.10f\n\n' % res2test, log_out)