-
Notifications
You must be signed in to change notification settings - Fork 0
/
calc_velco_cg.py
executable file
·109 lines (104 loc) · 4.68 KB
/
calc_velco_cg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/Users/yuzhang/anaconda/bin/python
# Filename: calc_msd.py
# Description: This is a python script to calculate the velocity autocorrelation functions(VACF) and normalized VAF (NVACF)
# VACF = <v(0)*v(t)>
# NVACF = <v(0)*v(t)>/<v(0)*v(0)>
# Date: 03-22-2017
# 03-23-2017 Output both velocity correlation functions and normalized velocity correlation functions
# 03-24-2017 Using coarse grained (CG) idea of calculting the VACFs so that the memory can be saved for a long time analysis
# The CG works as follow: assume that m steps are skipped, and the frame starts with frame 0, then only n*m and n*m+1 frame will
# be used for the calculation of speed, and skipping all other frames in the chunk.
# | | | | | | | | | | ...
# v(0) v(1) v(2) ...
import mdtraj as md
import numpy as np
import matplotlib.pyplot as plt
import calc_common as comm
if comm.args.suffix != None:
outname = 'vcf_cg_'+comm.args.suffix
else:
outname = 'vcf_cg'
########## Main ###########
traj_name, traj0, topology = comm.load_traj()
para = comm.load_para()
res_targ, res_name = comm.select_groups(traj0)
massind, chargeind = comm.load_atomic(res_targ, topology, para)
bin_size = comm.bin_size
dt = comm.dt
#nframe = comm.args.nframe-1
nframe = (comm.args.nframe-1)/comm.args.skip
xyz_pre = comm.calc_xyz_com(res_targ, traj0, topology, para)
vc = [[[] for row in range(nframe-nframe/2+1)] for _ in res_targ]
base = [[[] for row in range(nframe-nframe/2+1)] for _ in res_targ]
vel_ref = []
chunk_size = 100
frame_ind = -1
for chunk_index, traj in enumerate(md.iterload(traj_name, chunk = chunk_size, top = 'topol.pdb')):
for sub_ind, frame in enumerate(traj):
frame_read = chunk_index*chunk_size+sub_ind
if frame_read%comm.args.skip != 0 and frame_read%comm.args.skip != 1:
if sub_ind%10 == 9:
print 'Reading chunk', chunk_index+1, 'and frame',chunk_index*chunk_size+sub_ind+1
continue
elif frame_read > nframe*comm.args.skip+1:
break
elif frame_read%comm.args.skip == 0:
frame_ind += 1
xyz_pre = comm.calc_xyz_com(res_targ, frame, topology, para)
continue
box = frame.unitcell_lengths[0, :]
xyz_com = comm.calc_xyz_com(res_targ, frame, topology, para)
vel = [[] for _ in res_targ]
for res_ind, xyz_res in enumerate(xyz_com):
vel[res_ind] = xyz_res-xyz_pre[res_ind]
for dim in range(3):
for i in range(len(xyz_res)):
if xyz_res[i, dim]-xyz_pre[res_ind][i, dim] > box[dim]/2:
vel[res_ind][i, dim] -= box[dim]
elif xyz_res[i, dim]-xyz_pre[res_ind][i, dim] < -box[dim]/2:
vel[res_ind][i, dim] += box[dim]
# pdb.set_trace()
if frame_ind < nframe/2:
vel_ref.append(vel)
p_start = 0
p_end = frame_ind
for res_ind, vel_res in enumerate(vel):
base[res_ind][0].append(np.mean(np.sum(vel[res_ind]**2, axis = 1)))
vc[res_ind][0].append(base[res_ind][0][-1])
elif nframe-nframe/2-frame_ind >= 0:
p_start = 0
p_end = nframe/2
else:
p_start = frame_ind-nframe+nframe/2
p_end = nframe/2
for res_ind, vel_res in enumerate(vel):
for loop_ind in range(p_start, p_end):
vc[res_ind][frame_ind-loop_ind].append(np.mean(np.sum(vel[res_ind]*vel_ref[loop_ind][res_ind], axis = 1)))
base[res_ind][frame_ind-loop_ind].append(np.mean(np.sum(vel_ref[loop_ind][res_ind]**2, axis = 1)))
# if chunk_index == 0:
# break
time = np.arange(nframe-nframe/2+1)*dt*comm.args.skip
data = time.copy()
data_raw = time.copy()
pt_start = len(time)/2
nvcf = []
vcf = []
print 'frames read:', chunk_index*chunk_size+sub_ind+1
fig = plt.figure()
for i, item in enumerate(vc):
nvcf.append([])
vcf.append([])
for j in range(len(item)):
nvcf[-1].append(np.mean(item[j])/np.mean(base[i][j]))
vcf[-1].append(np.mean(item[j]))
nvcf[i] = np.array(nvcf[i])
vcf[i] = np.array(vcf[i])*1e6/dt**2 ### units are m^2/s^2
data = np.column_stack((data, nvcf[i]))
data_raw = np.column_stack((data_raw, vcf[i]))
plt.plot(time, nvcf[i], label = str(i))
plt.legend(loc = 'best')
plt.xlabel('time (ps)')
plt.ylabel('Normalized Velociry Correlation Function')
plt.savefig('n'+outname+'.pdf')
np.savetxt('n'+outname+'.txt', data, fmt = ['%12.6f' for i in range(len(data[0]))])
np.savetxt(outname+'.txt', data_raw, fmt = ['%14.6E' for i in range(len(data_raw[0]))])