Skip to content

Commit

Permalink
Merge pull request #149 from kyleabeauchamp/py3
Browse files Browse the repository at this point in the history
Py3
  • Loading branch information
kyleabeauchamp committed Jan 8, 2015
2 parents e1dd00c + eb2df65 commit c258e81
Show file tree
Hide file tree
Showing 15 changed files with 166 additions and 157 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ script:
env:
matrix:
- CONDA_PY=2.7
- CONDA_PY=3.4

global:
# encrypted BINSTAR_TOKEN for push of dev package to binstar
Expand Down
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,13 @@ Prerequisites

The pymbar module requires the following:

* Python 2.4 or later: http://www.python.org/
* The `Python.h` header file (either installed via the Python installer, but a separate `python-dev` package on distributions like Ubunti)
* Python 2.7 or later: http://www.python.org/
* the NumPy package: http://numpy.scipy.org/
* the SciPy package: http://www.scipy.org/
* NumExpr
* six
* cython
* nose
* Some optional graphing functionality in the tests requires the matplotlib library: http://matplotlib.sourceforge.net/

Quickstart
Expand Down
4 changes: 3 additions & 1 deletion devtools/conda-recipe/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,15 @@ requirements:
- numpy
- scipy
- setuptools
- numexpr
- numexpr
- six
run:
- python
- cython
- numpy
- scipy
- numexpr
- six

test:
requires:
Expand Down
26 changes: 13 additions & 13 deletions pymbar/bar.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def BARzero(w_F, w_R, DeltaF):
log_f_F = - max_arg_F - np.log(np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F))
except:
# give up; if there's overflow, return zero
print "The input data results in overflow in BAR"
print("The input data results in overflow in BAR")
return np.nan
log_numer = _logsum(log_f_F) - np.log(T_F)

Expand All @@ -121,7 +121,7 @@ def BARzero(w_F, w_R, DeltaF):
try:
log_f_R = - max_arg_R - np.log(np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R)) - w_R
except:
print "The input data results in overflow in BAR"
print("The input data results in overflow in BAR")
return np.nan
log_denom = _logsum(log_f_R) - np.log(T_R)

Expand Down Expand Up @@ -183,7 +183,7 @@ def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, maximum_iterations=500,
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> [DeltaF, dDeltaF] = BAR(w_F, w_R)
>>> print 'Free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF)
>>> print('Free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Free energy difference is 1.088 +- 0.050 kT
Test various other schemes.
Expand Down Expand Up @@ -214,7 +214,7 @@ def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, maximum_iterations=500,

if (np.isnan(FUpperB) or np.isnan(FLowerB)):
# this data set is returning NAN -- will likely not work. Return 0, print a warning:
print "Warning: BAR is likely to be inaccurate because of poor overlap. Improve the sampling, or decrease the spacing betweeen states. For now, guessing that the free energy difference is 0 with no uncertainty."
print("Warning: BAR is likely to be inaccurate because of poor overlap. Improve the sampling, or decrease the spacing betweeen states. For now, guessing that the free energy difference is 0 with no uncertainty.")
if compute_uncertainty:
return [0.0, 0.0]
else:
Expand All @@ -224,7 +224,7 @@ def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, maximum_iterations=500,
# if they have the same sign, they do not bracket. Widen the bracket until they have opposite signs.
# There may be a better way to do this, and the above bracket should rarely fail.
if verbose:
print 'Initial brackets did not actually bracket, widening them'
print('Initial brackets did not actually bracket, widening them')
FAve = (UpperB + LowerB) / 2
UpperB = UpperB - max(abs(UpperB - FAve), 0.1)
LowerB = LowerB + max(abs(LowerB - FAve), 0.1)
Expand All @@ -251,7 +251,7 @@ def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, maximum_iterations=500,
if FNew == 0:
# Convergence is achieved.
if verbose:
print "Convergence achieved."
print("Convergence achieved.")
relative_change = 10 ^ (-15)
break

Expand All @@ -269,7 +269,7 @@ def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, maximum_iterations=500,
if (DeltaF == 0.0):
# The free energy difference appears to be zero -- return.
if verbose:
print "The free energy difference appears to be zero."
print("The free energy difference appears to be zero.")
if compute_uncertainty:
return [0.0, 0.0]
else:
Expand All @@ -278,12 +278,12 @@ def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, maximum_iterations=500,
if iterated_solution:
relative_change = abs((DeltaF - DeltaF_old) / DeltaF)
if verbose:
print "relative_change = %12.3f" % relative_change
print("relative_change = %12.3f" % relative_change)

if ((iteration > 0) and (relative_change < relative_tolerance)):
# Convergence is achieved.
if verbose:
print "Convergence achieved."
print("Convergence achieved.")
break

if method == 'false-position' or method == 'bisection':
Expand All @@ -300,13 +300,13 @@ def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, maximum_iterations=500,
raise BoundsError(message)

if verbose:
print "iteration %5d : DeltaF = %16.3f" % (iteration, DeltaF)
print("iteration %5d : DeltaF = %16.3f" % (iteration, DeltaF))

# Report convergence, or warn user if not achieved.
if iterated_solution:
if iteration < maximum_iterations:
if verbose:
print 'Converged to tolerance of %e in %d iterations (%d function evaluations)' % (relative_change, iteration, nfunc)
print('Converged to tolerance of %e in %d iterations (%d function evaluations)' % (relative_change, iteration, nfunc))
else:
message = 'WARNING: Did not converge to within specified tolerance. max_delta = %f, TOLERANCE = %f, MAX_ITS = %d' % (relative_change, relative_tolerance, maximum_iterations)
raise ConvergenceError(message)
Expand Down Expand Up @@ -340,11 +340,11 @@ def BAR(w_F, w_R, DeltaF=0.0, compute_uncertainty=True, maximum_iterations=500,

dDeltaF = np.sqrt(variance)
if verbose:
print "DeltaF = %8.3f +- %8.3f" % (DeltaF, dDeltaF)
print("DeltaF = %8.3f +- %8.3f" % (DeltaF, dDeltaF))
return (DeltaF, dDeltaF)
else:
if verbose:
print "DeltaF = %8.3f" % (DeltaF)
print("DeltaF = %8.3f" % (DeltaF))
return DeltaF

#=============================================================================================
Expand Down
78 changes: 39 additions & 39 deletions pymbar/confidenceintervals.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,21 +178,21 @@ def generateConfidenceIntervals(replicates, K):
# If the error is normal, we should have
# P(error < alpha sigma) = erf(alpha / sqrt(2))

print "The uncertainty estimates are tested in this section."
print "If the error is normally distributed, the actual error will be less than a"
print "multiplier 'alpha' times the computed uncertainty 'sigma' a fraction of"
print "time given by:"
print "P(error < alpha sigma) = erf(alpha / sqrt(2))"
print "For example, the true error should be less than 1.0 * sigma"
print "(one standard deviation) a total of 68% of the time, and"
print "less than 2.0 * sigma (two standard deviations) 95% of the time."
print "The observed fraction of the time that error < alpha sigma, and its"
print "uncertainty, is given as 'obs' (with uncertainty 'obs err') below."
print "This should be compared to the column labeled 'normal'."
print "A weak lower bound that holds regardless of how the error is distributed is given"
print "by Chebyshev's inequality, and is listed as 'cheby' below."
print "Uncertainty estimates are tested for both free energy differences and expectations."
print ""
print("The uncertainty estimates are tested in this section.")
print("If the error is normally distributed, the actual error will be less than a")
print("multiplier 'alpha' times the computed uncertainty 'sigma' a fraction of")
print("time given by:")
print("P(error < alpha sigma) = erf(alpha / sqrt(2))")
print("For example, the true error should be less than 1.0 * sigma")
print("(one standard deviation) a total of 68% of the time, and")
print("less than 2.0 * sigma (two standard deviations) 95% of the time.")
print("The observed fraction of the time that error < alpha sigma, and its")
print("uncertainty, is given as 'obs' (with uncertainty 'obs err') below.")
print("This should be compared to the column labeled 'normal'.")
print("A weak lower bound that holds regardless of how the error is distributed is given")
print("by Chebyshev's inequality, and is listed as 'cheby' below.")
print("Uncertainty estimates are tested for both free energy differences and expectations.")
print("")

# error bounds

Expand All @@ -219,11 +219,11 @@ def generateConfidenceIntervals(replicates, K):
# We only count differences where the analytical difference is larger than a cutoff, so that the results will not be limited by machine precision.
if (dim == 0):
if np.isnan(replicate['error']) or np.isnan(replicate['destimated']):
print "replicate %d" % replicate_index
print "error"
print replicate['error']
print "destimated"
print replicate['destimated']
print("replicate %d" % replicate_index)
print("error")
print(replicate['error'])
print("destimated")
print(replicate['destimated'])
raise "isnan"
else:
if abs(replicate['error']) <= alpha * replicate['destimated']:
Expand All @@ -234,11 +234,11 @@ def generateConfidenceIntervals(replicates, K):
elif (dim == 1):
for i in range(0, K):
if np.isnan(replicate['error'][i]) or np.isnan(replicate['destimated'][i]):
print "replicate %d" % replicate_index
print "error"
print replicate['error']
print "destimated"
print replicate['destimated']
print("replicate %d" % replicate_index)
print("error")
print(replicate['error'])
print("destimated")
print(replicate['destimated'])
raise "isnan"
else:
if abs(replicate['error'][i]) <= alpha * replicate['destimated'][i]:
Expand All @@ -250,11 +250,11 @@ def generateConfidenceIntervals(replicates, K):
for i in range(0, K):
for j in range(0, i):
if np.isnan(replicate['error'][i, j]) or np.isnan(replicate['destimated'][i, j]):
print "replicate %d" % replicate_index
print "ij_error"
print replicate['error']
print "ij_estimated"
print replicate['destimated']
print("replicate %d" % replicate_index)
print("ij_error")
print(replicate['error'])
print("ij_estimated")
print(replicate['destimated'])
raise "isnan"
else:
if abs(replicate['error'][i, j]) <= alpha * replicate['destimated'][i, j]:
Expand All @@ -268,12 +268,12 @@ def generateConfidenceIntervals(replicates, K):
dPobs[alpha_index] = np.sqrt(a * b / ((a + b) ** 2 * (a + b + 1)))

# Write error as a function of sigma.
print "Error vs. alpha"
print "%5s %10s %10s %16s %17s" % ('alpha', 'cheby', 'obs', 'obs err', 'normal')
print("Error vs. alpha")
print("%5s %10s %10s %16s %17s" % ('alpha', 'cheby', 'obs', 'obs err', 'normal'))
Pnorm = scipy.special.erf(alpha_values / np.sqrt(2.))
for alpha_index in range(0, nalpha):
alpha = alpha_values[alpha_index]
print "%5.1f %10.6f %10.6f (%10.6f,%10.6f) %10.6f" % (alpha, 1. - 1. / alpha ** 2, Pobs[alpha_index], Plow[alpha_index], Phigh[alpha_index], Pnorm[alpha_index])
print("%5.1f %10.6f %10.6f (%10.6f,%10.6f) %10.6f" % (alpha, 1. - 1. / alpha ** 2, Pobs[alpha_index], Plow[alpha_index], Phigh[alpha_index], Pnorm[alpha_index]))

# compute bias, average, etc - do it by replicate, not by bias
if dim == 0:
Expand Down Expand Up @@ -318,9 +318,9 @@ def generateConfidenceIntervals(replicates, K):
ave_std = (np.average(d2, axis=0)) ** (1.0 / 2.0)

# for now, just print out the data at the end for each
print ""
print " i average bias rms_error stddev ave_analyt_std"
print "---------------------------------------------------------------------"
print("")
print(" i average bias rms_error stddev ave_analyt_std")
print("---------------------------------------------------------------------")
if dim == 0:
pave = aveval
pbias = bias
Expand All @@ -334,16 +334,16 @@ def generateConfidenceIntervals(replicates, K):
prms = rms_error[i]
pstdev = standarddev[i]
pavestd = ave_std[i]
print "%7d %10.4f %10.4f %10.4f %10.4f %10.4f" % (i, pave, pbias, prms, pstdev, pavestd)
print("%7d %10.4f %10.4f %10.4f %10.4f %10.4f" % (i, pave, pbias, prms, pstdev, pavestd))
elif dim == 2:
for i in range(0, K):
pave = aveval[0, i]
pbias = bias[0, i]
prms = rms_error[0, i]
pstdev = standarddev[0, i]
pavestd = ave_std[0, i]
print "%7d %10.4f %10.4f %10.4f %10.4f %10.4f" % (i, pave, pbias, prms, pstdev, pavestd)
print("%7d %10.4f %10.4f %10.4f %10.4f %10.4f" % (i, pave, pbias, prms, pstdev, pavestd))

print "Totals: %10.4f %10.4f %10.4f %10.4f %10.4f" % (pave, pbias, prms, pstdev, pavestd)
print("Totals: %10.4f %10.4f %10.4f %10.4f %10.4f" % (pave, pbias, prms, pstdev, pavestd))

return alpha_values, Pobs, Plow, Phigh, dPobs, Pnorm
8 changes: 4 additions & 4 deletions pymbar/exp.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,10 +84,10 @@ def EXP(w_F, compute_uncertainty=True, is_timeseries=False):
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> [DeltaF, dDeltaF] = EXP(w_F)
>>> print 'Forward free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF)
>>> print('Forward free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Forward free energy difference is 1.088 +- 0.076 kT
>>> [DeltaF, dDeltaF] = EXP(w_R)
>>> print 'Reverse free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF)
>>> print('Reverse free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Reverse free energy difference is -1.073 +- 0.082 kT
"""
Expand Down Expand Up @@ -159,10 +159,10 @@ def EXPGauss(w_F, compute_uncertainty=True, is_timeseries=False):
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> [DeltaF, dDeltaF] = EXPGauss(w_F)
>>> print 'Forward Gaussian approximated free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF)
>>> print('Forward Gaussian approximated free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Forward Gaussian approximated free energy difference is 1.049 +- 0.089 kT
>>> [DeltaF, dDeltaF] = EXPGauss(w_R)
>>> print 'Reverse Gaussian approximated free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF)
>>> print('Reverse Gaussian approximated free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Reverse Gaussian approximated free energy difference is -1.073 +- 0.080 kT
"""
Expand Down

0 comments on commit c258e81

Please sign in to comment.