Skip to content

Commit

Permalink
Drop support for Python 2 (#704)
Browse files Browse the repository at this point in the history
  • Loading branch information
alexhenrie committed Mar 12, 2020
1 parent 511c7d8 commit e1b5a8b
Show file tree
Hide file tree
Showing 8 changed files with 21 additions and 31 deletions.
14 changes: 7 additions & 7 deletions benchmarks/benchmark_distributions.py
Expand Up @@ -17,7 +17,7 @@
def print_benchmark( distribution, duration ):
"""Formatted print."""

print "{:25}: {:.4}s".format( distribution.__class__.__name__, duration )
print( "{:25}: {:.4}s".format( distribution.__class__.__name__, duration ) )

def bench_log_probability( distribution, n=10000000, symbol=5 ):
"""Bench a log probability distribution."""
Expand Down Expand Up @@ -115,11 +115,11 @@ def benchmark_distribution_train():

print_benchmark( distribution, bench_from_sample( distribution, sample ) )

print "DISTRIBUTION BENCHMARKS"
print "-----------------------"
print
print "LOG PROBABILITY (N=10,000,000 iterations, N=100,000 FOR MVG)"
print( "DISTRIBUTION BENCHMARKS" )
print( "-----------------------" )
print()
print( "LOG PROBABILITY (N=10,000,000 iterations, N=100,000 FOR MVG)" )
benchmark_distribution_log_probabilities()
print
print "TRAINING (N=1,000 ITERATIONS, BATCHES=10,000 ITEMS)"
print()
print( "TRAINING (N=1,000 ITERATIONS, BATCHES=10,000 ITEMS)" )
benchmark_distribution_train()
8 changes: 4 additions & 4 deletions benchmarks/benchmark_hmm.py
Expand Up @@ -54,26 +54,26 @@ def global_alignment( match_distributions, insert_distribution ):

def benchmark_forward( model, sample ):
tic = time.time()
for i in xrange(25000):
for i in range(25000):
logp = model.forward( sample )[-1, model.end_index]
print("{:16}: time: {:5.5}, logp: {:5.5}".format( "FORWARD", time.time() - tic, logp ))

def benchmark_backward( model, sample ):
tic = time.time()
for i in xrange(25000):
for i in range(25000):
logp = model.backward( sample )[0, model.start_index]
print("{:16}: time: {:5.5}, logp: {:5.5}".format( "BACKWARD", time.time() - tic, logp ))

def benchmark_forward_backward( model, sample ):
tic = time.time()
for i in xrange(25000):
for i in range(25000):
model.forward_backward( sample )
print("{:16}: time: {:5.5}".format( "FORWARD-BACKWARD", time.time() - tic ))

def benchmark_viterbi( model, sample ):

tic = time.time()
for i in xrange(25000):
for i in range(25000):
logp, path = model.viterbi( sample )
print("{:16}: time: {:5.5}, logp: {:5.5}".format( "VITERBI", time.time() - tic, logp ))

Expand Down
2 changes: 1 addition & 1 deletion docs/callbacks.rst
Expand Up @@ -63,6 +63,6 @@ The following callbacks are built in to pomegranate:
>>> from pomegranate import *
>>>
>>> def on_training_end(logs):
>>> print "Total Improvement: {:4.4}".format(logs['total_improvement'])
>>> print("Total Improvement: {:4.4}".format(logs['total_improvement']))
>>>
>>> HiddenMarkovModel.from_samples(X, callbacks=[LambdaCheckpoint(on_training_end=on_training_end)])
6 changes: 3 additions & 3 deletions docs/ooc.rst
Expand Up @@ -78,18 +78,18 @@ This is a simple example with a simple distribution, but all models and model st
>>> model2 = model.copy()
>>>
>>> X = numpy.random.randint(2, size=(10000, 4))
>>> print model.states[0].distribution.equals( model2.states[0].distribution )
>>> print(model.states[0].distribution.equals(model2.states[0].distribution))
True
>>> model.fit(X)
>>> print model.states[0].distribution.equals( model2.states[0].distribution )
>>> print(model.states[0].distribution.equals(model2.states[0].distribution))
False
>>> model2.summarize(X[:2500])
>>> model2.summarize(X[2500:5000])
>>> model2.summarize(X[5000:7500])
>>> model2.summarize(X[7500:])
>>> model2.from_summaries()
>>>
>>> print model.states[0].distribution.equals( model2.states[0].distribution )
>>> print(model.states[0].distribution.equals(model2.states[0].distribution))
True
We can see that before fitting to any data, the distribution in one of the states is equal for both. After fitting the first distribution they become different as would be expected. After fitting the second one through summarize the distributions become equal again, showing that it is recovering an exact update.
Expand Down
6 changes: 3 additions & 3 deletions pomegranate/BayesianNetwork.pyx
Expand Up @@ -184,9 +184,9 @@ cdef class BayesianNetwork(GraphModel):
>>> model.add_nodes([s1, s2])
>>> model.add_edge(s1, s2)
>>> model.bake()
>>> print model.log_probability(['A', 'B'])
>>> print(model.log_probability(['A', 'B']))
-1.71479842809
>>> print model.predict_proba({'s2' : 'A'})
>>> print(model.predict_proba({'s2' : 'A'}))
array([ {
"frozen" :false,
"class" :"Distribution",
Expand All @@ -209,7 +209,7 @@ cdef class BayesianNetwork(GraphModel):
],
"name" :"DiscreteDistribution"
}], dtype=object)
>>> print model.impute([[None, 'A']])
>>> print(model.impute([[None, 'A']]))
[['B', 'A']]
"""

Expand Down
12 changes: 2 additions & 10 deletions pomegranate/distributions/DiscreteDistribution.pyx
Expand Up @@ -5,7 +5,6 @@
# Contact: Jacob Schreiber <jmschreiber91@gmail.com>

import numpy
import sys
import itertools as it
import json
import random
Expand All @@ -26,13 +25,6 @@ DEF NEGINF = float("-inf")
DEF INF = float("inf")
eps = numpy.finfo(numpy.float64).eps

if sys.version_info[0] > 2:
# Set up for Python 3
xrange = range
izip = zip
else:
izip = it.izip

cdef class DiscreteDistribution(Distribution):
"""
A discrete distribution, made up of characters and their probabilities,
Expand Down Expand Up @@ -236,7 +228,7 @@ cdef class DiscreteDistribution(Distribution):

self.summaries[1] += weights.sum()
characters = self.summaries[0]
for i in xrange(len(items)):
for i in range(len(items)):
characters[items[i]] += weights[i]

cdef double _summarize(self, double* items, double* weights, int n,
Expand Down Expand Up @@ -337,7 +329,7 @@ cdef class DiscreteDistribution(Distribution):
Xs = {}
total = 0

for X, weight in izip(items, weights):
for X, weight in zip(items, weights):
if _check_nan(X):
continue

Expand Down
2 changes: 1 addition & 1 deletion pomegranate/distributions/GammaDistribution.pyx
Expand Up @@ -196,7 +196,7 @@ cdef class GammaDistribution(Distribution):
scipy.special.polygamma(0, shape) -
statistic) / (1.0 / shape - scipy.special.polygamma(1, shape))

#print new_shape, scipy.special.polygamma(1, shape)
#print(new_shape, scipy.special.polygamma(1, shape))

# Don't let shape escape from valid values
if abs(new_shape) == float("inf") or new_shape == 0:
Expand Down
2 changes: 0 additions & 2 deletions pomegranate/hmm.pyx
Expand Up @@ -4,8 +4,6 @@
# Authors: Jacob Schreiber <jmschreiber91@gmail.com>
# Adam Novak <anovak1@ucsc.edu>

from __future__ import print_function

from libc.math cimport exp as cexp
from operator import attrgetter
import math, random, itertools as it, sys, json
Expand Down

0 comments on commit e1b5a8b

Please sign in to comment.