From af10e668c74a69bc7ba03b289491ed480e561a50 Mon Sep 17 00:00:00 2001 From: cchen156 Date: Mon, 16 Apr 2018 23:53:29 -0500 Subject: [PATCH] first commit --- checkpoint/Fuji/checkpoint | 2 + checkpoint/Fuji/model.ckpt.index | Bin 0 -> 4602 bytes checkpoint/Sony/checkpoint | 2 + checkpoint/Sony/model.ckpt.index | Bin 0 -> 4602 bytes dataset/Fuji_test_list.txt | 524 +++++++++ dataset/Fuji_train_list.txt | 1655 ++++++++++++++++++++++++++ dataset/Fuji_val_list.txt | 218 ++++ dataset/Sony_test_list.txt | 598 ++++++++++ dataset/Sony_train_list.txt | 1865 ++++++++++++++++++++++++++++++ dataset/Sony_val_list.txt | 234 ++++ download_dataset.py | 42 + download_models.py | 41 + test_Fuji.py | 194 ++++ test_Sony.py | 173 +++ train_Fuji.py | 237 ++++ train_Sony.py | 220 ++++ 16 files changed, 6005 insertions(+) create mode 100644 checkpoint/Fuji/checkpoint create mode 100644 checkpoint/Fuji/model.ckpt.index create mode 100644 checkpoint/Sony/checkpoint create mode 100644 checkpoint/Sony/model.ckpt.index create mode 100644 dataset/Fuji_test_list.txt create mode 100644 dataset/Fuji_train_list.txt create mode 100644 dataset/Fuji_val_list.txt create mode 100644 dataset/Sony_test_list.txt create mode 100644 dataset/Sony_train_list.txt create mode 100644 dataset/Sony_val_list.txt create mode 100644 download_dataset.py create mode 100644 download_models.py create mode 100644 test_Fuji.py create mode 100644 test_Sony.py create mode 100644 train_Fuji.py create mode 100644 train_Sony.py diff --git a/checkpoint/Fuji/checkpoint b/checkpoint/Fuji/checkpoint new file mode 100644 index 0000000..febd7d5 --- /dev/null +++ b/checkpoint/Fuji/checkpoint @@ -0,0 +1,2 @@ +model_checkpoint_path: "model.ckpt" +all_model_checkpoint_paths: "model.ckpt" diff --git a/checkpoint/Fuji/model.ckpt.index b/checkpoint/Fuji/model.ckpt.index new file mode 100644 index 0000000000000000000000000000000000000000..f33f3ec965f9e6057ff90eece66629e284155ce3 GIT binary patch literal 4602 zcma)s zQjb>a0ooSu2C<0RN~*1ARJ`zjQD_w`<5dSytE2t>_Lq%evm2Y41Sa3ldv8Dcd;8uC z!+7&-2aabM-W(gBo*bVrC6Q+X0s=Um6W?CEl?#9mO_e!Gd;0(^rIDPdeUQ-4c@HrzK8< zwi@Waqd6O?DMAO`m|TBt+in9tiT})*HBrVEtxj#rwH1A-QSE%OkwHH`SmzToD=|4~ z(sb++FQ0Y1@$x&G?F)dkb`(Zdl(y4rgQWfyz;y&3oO=ARkqZ(BQ=rO;icC4B_0bvQ zB3tU1&Gxehu)^en;h*ZYMrk~}YzWU$xN}V2AR`B@9j1_+r{wg3Iqx%uAji#u?pwNf zK{xo;QFm3?XteNC=beFumToezaEl1mr5(vKxwy%wheV|(_(k))G0IKt=vGnfZ0y}6 zceP^_n)}_EBU3w03G3B7)Ym6iSqO^-cfpQmJ|`@xH=i}^lo0m$RedXOUt^TKU<1tdt)+l)!b;!uzssy3+)iQilM@D4D@ckdSv5QD zxJN5McOq)j2qTY$@*b>!ytkXkC3)IgYF60cFN6sL`#y576!v2iotE`1n8+B7G@GL0 zq>4(n6(=RYO$4_&vpz@W*i1=ETyBG^;{fX9A);2DFfJx7N5xShj}p19aLx=RM+rMa z*xj7?LT_JF57)|RX#ul8tOiCSJZ)IFS+6xoT2lIi(hh&+X^fmnoFP(Vtq~i3YumWq z*C*upl*Cn1nn8|L(w>E^H9#o3-Cl3dhDf6*qPGdPCr#FwxDatjMb65JU8XMBzLe)p zQe9v*kS;K584wzwqRyV7V3tj3_jx-~qu^$~d_J9>P$Sii*br7{!LL zHJ24?)-8=NXhRiC>V)gH1g|ZRO*e6&?$vXW&E7-~ZM?Dz$|ty$PXl9r#fMq-!k$%Q z&&v%Vt)z5o^3aMFh7i?KRMywFmNA6V%)${oxk9tM*bH}zCd01o9$gVG@FjTCx_=7P zU6DN0ipaN^mG=~CiAa4LH1$8k%uz74Hi+rzpn{j>rnwXiw%++$y_On$I$@N2gmK^S zdS}!HH9sd@7)bE_6;HDj{hVY&J0df$^jZ$JjCQLf%%l9mPB>f7WL0dG8{%B1G`qXu z%NB+Z6;m{1+l8(#Fa)?9VXUBb=AeTf4G}*43c+2nv&X3$BH3Uja(?E-4;3ni$ifix zTkZAd2*zllDHrXwL5iy}&^DqQkF9Ah_dP;UvZ+^sy07#YRU(W}XYTYN->CUI;qNR2 zvx~-8Df&9e!cZcM(o!0sqP^TI%EI%vyEE=66YJXFl2;VcbCf2nIC-{(Aw(A`TJTZj zK=s8{ycB?I5QcT|mFSNCYKyCreMW4*vcL$%#nlNOeogE&3gyIMmxiHSMELi5 z1-V*EGq!x~CzrcM(dsSHb?S1`c6kF~*e>U1q)kwhbF!txPEC4jaBi2q{`Yoy;{rAB z6yGiv&aJ&&E9yA5-FOTLC!~$o_qO}FwUNSit1t4hGu=%}T4!JFrgHRw)dzn=!O8G8 zvT*cSJZki>ibo$8baA+dZrlHJk^9ldh07zZ?r4}r@#uQH1OQkO76vbH*SbsL5%&5F znbti@GN+yB|DvO66QXcb?XtFOwdbR1(_SLyU%!^`;i%elkg%+c6Nf{w`pAoAhBLMu z5cuVOIp?Cx3c~voPPy9mnX45f&6GTSeR&jP2&QEUyMMkjy8bl6lQT4WfQ?|=n3t(t z?!gbp-KU8xy)o~cnjdy}p0Fd!#|FFC4Ewn(+EwN~s&uz2r|{DG$QYU39!h#&+A&zQ zW@d8QizsXq`)l9qq~hctFV_(Hg^<2a$;pBJdxfx=HO+$Y-GeN^JCEg68* zKZdDo%`l;QhT##Y9C6?T{$=sQc5g#5!+K3(ehhJb*QQ5F@}>~JEW{tb^8EVXv2~GU I#?b2j0QQA97XSbN literal 0 HcmV?d00001 diff --git a/checkpoint/Sony/checkpoint b/checkpoint/Sony/checkpoint new file mode 100644 index 0000000..febd7d5 --- /dev/null +++ b/checkpoint/Sony/checkpoint @@ -0,0 +1,2 @@ +model_checkpoint_path: "model.ckpt" +all_model_checkpoint_paths: "model.ckpt" diff --git a/checkpoint/Sony/model.ckpt.index b/checkpoint/Sony/model.ckpt.index new file mode 100644 index 0000000000000000000000000000000000000000..ce84e33b8d7845a8add375eda11ee0a99fd71ab6 GIT binary patch literal 4602 zcma)+aoq*LG$XX1||v&i&lq zx#yf^EL7kg<^_%k*3^tSIT=%?X9`?oWF#-}(mO`!oVcR%JGa2w(;Z5P zLx{H`1f*5$c(QnufwLt7XYzK&3n_?kfR!L7i1VA5bQrkUB*2V5m=*p{0qS$4c+p*U zWFzZkoRXQB;Ygo7D?f7%@C@=Vxdfe9vCuS97iLWKNtWG9Sf8l3)2Ggw`L1K|peZ>S zb2H~cTOITtVf_H9!Q!_Q+Nbx?M-3V^En|i^+eKN+uPyII@lld5HM&B<I2%j4l?Ls&tfdtKgHGY_pjrjS%pa!hxpf|+7GHw(J= zcME}T@a^gDb8KW6Ba-K3nf%=p;86mn&!2nF;^U^EZV~lI;YF(;m=gok)>KJqwQqIq z3sn1*!f6px?<&;RQZgyN;+gQUSal&R7Tkq(h?el;$@bUlO!`>)?qMC|YxR&jw-VX9 z;OZO;AL}ib0y|Sp*rCSap`qdCL}kHJtggSd1Hy}MzV+Nlg%yPVP}tD0h51@R(nU$- zf#-JwwF2}mq76FnxPD($5Gx?RsUuPsUvfyx3On3Gm_GMXbzr5ipL)q@_I^VxGn;8P zC8o<2m2WG~B7o}&PB$Op6poFQILfZh)O1XtPWB_}G>C(qUVTEtQ6>))S-`SJsX5Bn zal%IbIAdIBxFyNAa(Y_8T>H-fqZ1dGWezv$EwYxJ9-=ho_|+aWZ;@t*9JNq%qoKSg zJj`})O5!Rh&mhk#>1svR87cN%)AwPM-X@Qtie4vlHp`!7;ce28s$36oy(bjZ34%qg z3#O>Fr`78C1hr#Y1bbILP@)L1fBZ~g-{e`%nr7|J;$pq#wb-XzER+T)`=+VaE79Plt!aX;f4?9>6Gt z2s>AEV)~_N-6nmUYDv9to1Wl*8zV+r__)C8dCAUDB8%SH6%FMR1Inj^v6l-8PNTS~ z;P8B7jY2<8gS_F^;KEnZ%=XjD*Fgv-MTj_zzI z)OJPkU<8q0=TE$;QcFbIdZX#mcIQzQ(-umP?#+8jY3imZdHKgfwQH%vrxQlGhA{3M zlVgTiwfwwr`GW*M85$F*>gOff9wO3^TOALz92!tdm`B&jqj9!wsw%cA4RJ10D(+qP zfS(~mT@(qMYaYLkA;1L{FbY<%uxsb5K@AaZ?nm&*wAl&ThDctr5_$ON=`&R-h{*dk z^y_tEP#iN`Xvz%@*dV3V80bQxEzFUn^!&u7A56%GiVh2?Cc*h2g0yjR^&FvYsq=puZSJDZBd?gyBzkvx62);sd;6@ z&V_Jp9qwCE&#`UC9YA<-<>JBci2D7|fHPe!h0m>?`H137x08}vs}8-QarA-J2Y*9B zFT0V2qt8EfexT~n#|K^6Uqai8?lc7+eSEl$#Pv6i?o>Ux?ydoV7gJNB=LBl)rSSKj zEz1>J-%yf2X~k#vJF3*OLFLWu{nKrm*{aJEQMUBRn}nqb6(w+s4gZ zhueer0lD-$BCATGYqb2Z!;^&lI3PJZux8lLS;=m4THMcpb_En(uCG6$uv 2000: + learning_rate = 1e-5 + + for ind in np.random.permutation(len(train_ids)): + # get the path from image id + train_id = train_ids[ind] + in_files = glob.glob(input_dir + '%05d_00*.RAF'%train_id) + in_path = in_files[np.random.random_integers(0,len(in_files)-1)] + _, in_fn = os.path.split(in_path) + + gt_files = glob.glob(gt_dir + '%05d_00*.RAF'%train_id) + gt_path = gt_files[0] + _, gt_fn = os.path.split(gt_path) + in_exposure = float(in_fn[9:-5]) + gt_exposure = float(gt_fn[9:-5]) + ratio = min(gt_exposure/in_exposure,300) + + st=time.time() + cnt+=1 + + if in_images[str(ratio)[0:3]][ind] is None: + raw = rawpy.imread(in_path) + in_images[str(ratio)[0:3]][ind] = np.expand_dims(pack_raw(raw),axis=0) *ratio + + gt_raw = rawpy.imread(gt_path) + im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16) + gt_images[ind] = np.expand_dims(np.float32(im/65535.0),axis = 0) + + #crop + H = in_images[str(ratio)[0:3]][ind].shape[1] + W = in_images[str(ratio)[0:3]][ind].shape[2] + + xx = np.random.randint(0,W-ps) + yy = np.random.randint(0,H-ps) + input_patch = in_images[str(ratio)[0:3]][ind][:,yy:yy+ps,xx:xx+ps,:] + gt_patch = gt_images[ind][:,yy*3:yy*3+ps*3,xx*3:xx*3+ps*3,:] + + if np.random.randint(2,size=1)[0] == 1: # random flip + input_patch = np.flip(input_patch, axis=1) + gt_patch = np.flip(gt_patch, axis=1) + if np.random.randint(2,size=1)[0] == 1: + input_patch = np.flip(input_patch, axis=0) + gt_patch = np.flip(gt_patch, axis=0) + if np.random.randint(2,size=1)[0] == 1: # random transpose + input_patch = np.transpose(input_patch, (0,2,1,3)) + gt_patch = np.transpose(gt_patch, (0,2,1,3)) + + input_patch = np.minimum(input_patch,1.0) + + _,G_current,output=sess.run([G_opt,G_loss,out_image],feed_dict={in_image:input_patch,gt_image:gt_patch,lr:learning_rate}) + output = np.minimum(np.maximum(output,0),1) + g_loss[ind]=G_current + + print('%d %d Loss=%.3f Time=%.3f'%(epoch,cnt,np.mean(g_loss[np.where(g_loss)]),time.time()-st)) + + if epoch%save_freq==0: + if not os.path.isdir(result_dir + '%04d'%epoch): + os.makedirs(result_dir + '%04d'%epoch) + + temp = np.concatenate((gt_patch[0,:,:,:],output[0,:,:,:]),axis=1) + scipy.misc.toimage(temp*255, high=255, low=0, cmin=0, cmax=255).save(result_dir + '%04d/%05d_00_train_%d.jpg'%(epoch,train_id,ratio)) + + saver.save(sess, checkpoint_dir + 'model.ckpt') \ No newline at end of file diff --git a/train_Sony.py b/train_Sony.py new file mode 100644 index 0000000..41a9372 --- /dev/null +++ b/train_Sony.py @@ -0,0 +1,220 @@ +#uniform content loss + adaptive threshold + per_class_input + recursive G +#improvement upon cqf37 +from __future__ import division +import os,time,scipy.io +import tensorflow as tf +import tensorflow.contrib.slim as slim +from tensorflow.contrib.layers.python.layers import initializers +import numpy as np +import pdb +import rawpy +import glob + + +input_dir = './dataset/Sony/short/' +gt_dir = './dataset/Sony/long/' +checkpoint_dir = './checkpoint/Sony/' +result_dir = './result_Sony/' + +#get train and test IDs +train_fns = glob.glob(gt_dir + '0*.ARW') +train_ids = [] +for i in range(len(train_fns)): + _, train_fn = os.path.split(train_fns[i]) + train_ids.append(int(train_fn[0:5])) + +test_fns = glob.glob(gt_dir + '/1*.ARW') +test_ids = [] +for i in range(len(test_fns)): + _, test_fn = os.path.split(test_fns[i]) + test_ids.append(int(test_fn[0:5])) + + + +ps = 512 #patch size for training +save_freq = 500 + +DEBUG = 0 +if DEBUG == 1: + save_freq = 2 + train_ids = train_ids[0:5] + test_ids = test_ids[0:5] + + + +def lrelu(x): + return tf.maximum(x*0.2,x) + +def upsample_and_concat(x1, x2, output_channels, in_channels): + + pool_size = 2 + deconv_filter = tf.Variable(tf.truncated_normal( [pool_size, pool_size, output_channels, in_channels], stddev=0.02)) + deconv = tf.nn.conv2d_transpose(x1, deconv_filter, tf.shape(x2) , strides=[1, pool_size, pool_size, 1] ) + + deconv_output = tf.concat([deconv, x2],3) + deconv_output.set_shape([None, None, None, output_channels*2]) + + return deconv_output + +def network(input): + conv1=slim.conv2d(input,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv1_1') + conv1=slim.conv2d(conv1,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv1_2') + pool1=slim.max_pool2d(conv1, [2, 2], padding='SAME' ) + + conv2=slim.conv2d(pool1,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv2_1') + conv2=slim.conv2d(conv2,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv2_2') + pool2=slim.max_pool2d(conv2, [2, 2], padding='SAME' ) + + conv3=slim.conv2d(pool2,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv3_1') + conv3=slim.conv2d(conv3,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv3_2') + pool3=slim.max_pool2d(conv3, [2, 2], padding='SAME' ) + + conv4=slim.conv2d(pool3,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv4_1') + conv4=slim.conv2d(conv4,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv4_2') + pool4=slim.max_pool2d(conv4, [2, 2], padding='SAME' ) + + conv5=slim.conv2d(pool4,512,[3,3], rate=1, activation_fn=lrelu,scope='g_conv5_1') + conv5=slim.conv2d(conv5,512,[3,3], rate=1, activation_fn=lrelu,scope='g_conv5_2') + + up6 = upsample_and_concat( conv5, conv4, 256, 512 ) + conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1') + conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2') + + up7 = upsample_and_concat( conv6, conv3, 128, 256 ) + conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1') + conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2') + + up8 = upsample_and_concat( conv7, conv2, 64, 128 ) + conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1') + conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2') + + up9 = upsample_and_concat( conv8, conv1, 32, 64 ) + conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1') + conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2') + + conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10') + out = tf.depth_to_space(conv10,2) + return out + + +def pack_raw(raw): + #pack Bayer image to 4 channels + im = raw.raw_image_visible.astype(np.float32) + im = np.maximum(im - 512,0)/ (16383 - 512) #subtract the black level + + im = np.expand_dims(im,axis=2) + img_shape = im.shape + H = img_shape[0] + W = img_shape[1] + + out = np.concatenate((im[0:H:2,0:W:2,:], + im[0:H:2,1:W:2,:], + im[1:H:2,1:W:2,:], + im[1:H:2,0:W:2,:]), axis=2) + return out + + + +sess=tf.Session() +in_image=tf.placeholder(tf.float32,[None,None,None,4]) +gt_image=tf.placeholder(tf.float32,[None,None,None,3]) +out_image=network(in_image) + +G_loss=tf.reduce_mean(tf.abs(out_image - gt_image)) + +t_vars=tf.trainable_variables() +lr=tf.placeholder(tf.float32) +G_opt=tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss,var_list=[var for var in t_vars if var.name.startswith('g_')]) + +saver=tf.train.Saver() +sess.run(tf.global_variables_initializer()) +ckpt=tf.train.get_checkpoint_state(checkpoint_dir) +if ckpt: + print('loaded '+ckpt.model_checkpoint_path) + saver.restore(sess,ckpt.model_checkpoint_path) + +#Raw data takes long time to load. Keep them in memory after loaded. +gt_images=[None]*6000 +input_images = {} +input_images['300'] = [None]*len(train_ids) +input_images['250'] = [None]*len(train_ids) +input_images['100'] = [None]*len(train_ids) + +g_loss = np.zeros((5000,1)) + + + +allfolders = glob.glob('./result/*0') +lastepoch = 0 +for folder in allfolders: + lastepoch = np.maximum(lastepoch, int(folder[-4:])) + +learning_rate = 1e-4 +for epoch in range(lastepoch,4001): + if os.path.isdir("result/%04d"%epoch): + continue + cnt=0 + if epoch > 2000: + learning_rate = 1e-5 + + for ind in np.random.permutation(len(train_ids)): + # get the path from image id + train_id = train_ids[ind] + in_files = glob.glob(input_dir + '%05d_00*.ARW'%train_id) + in_path = in_files[np.random.random_integers(0,len(in_files)-1)] + _, in_fn = os.path.split(in_path) + + gt_files = glob.glob(gt_dir + '%05d_00*.ARW'%train_id) + gt_path = gt_files[0] + _, gt_fn = os.path.split(gt_path) + in_exposure = float(in_fn[9:-5]) + gt_exposure = float(gt_fn[9:-5]) + ratio = min(gt_exposure/in_exposure,300) + + st=time.time() + cnt+=1 + + if input_images[str(ratio)[0:3]][ind] is None: + raw = rawpy.imread(in_path) + input_images[str(ratio)[0:3]][ind] = np.expand_dims(pack_raw(raw),axis=0) *ratio + + gt_raw = rawpy.imread(gt_path) + im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16) + gt_images[ind] = np.expand_dims(np.float32(im/65535.0),axis = 0) + + + #crop + H = input_images[str(ratio)[0:3]][ind].shape[1] + W = input_images[str(ratio)[0:3]][ind].shape[2] + + xx = np.random.randint(0,W-ps) + yy = np.random.randint(0,H-ps) + input_patch = input_images[str(ratio)[0:3]][ind][:,yy:yy+ps,xx:xx+ps,:] + gt_patch = gt_images[ind][:,yy*2:yy*2+ps*2,xx*2:xx*2+ps*2,:] + + if np.random.randint(2,size=1)[0] == 1: # random flip + input_patch = np.flip(input_patch, axis=1) + gt_patch = np.flip(gt_patch, axis=1) + if np.random.randint(2,size=1)[0] == 1: + input_patch = np.flip(input_patch, axis=0) + gt_patch = np.flip(gt_patch, axis=0) + if np.random.randint(2,size=1)[0] == 1: # random transpose + input_patch = np.transpose(input_patch, (0,2,1,3)) + gt_patch = np.transpose(gt_patch, (0,2,1,3)) + + input_patch = np.minimum(input_patch,1.0) + + _,G_current,output=sess.run([G_opt,G_loss,out_image],feed_dict={in_image:input_patch,gt_image:gt_patch,lr:learning_rate}) + output = np.minimum(np.maximum(output,0),1) + g_loss[ind]=G_current + + print("%d %d Loss=%.3f Time=%.3f"%(epoch,cnt,np.mean(g_loss[np.where(g_loss)]),time.time()-st)) + + if epoch%save_freq==0: + if not os.path.isdir(result_dir + '%04d'%epoch): + os.makedirs(result_dir + '%04d'%epoch) + + temp = np.concatenate((gt_patch[0,:,:,:],output[0,:,:,:]),axis=1) + scipy.misc.toimage(temp*255, high=255, low=0, cmin=0, cmax=255).save(result_dir + '%04d/%05d_00_train_%d.jpg'%(epoch,train_id,ratio)) + + saver.save(sess, checkpoint_dir + 'model.ckpt') \ No newline at end of file