Skip to content

Commit

Permalink
Merge branch 'feature/#17-ReST-based_automatic_documentation'
Browse files Browse the repository at this point in the history
  • Loading branch information
inureyes committed Jul 7, 2017
2 parents 11fe723 + a618002 commit f35743e
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 10 deletions.
1 change: 1 addition & 0 deletions .gitignore
Expand Up @@ -11,6 +11,7 @@ __pycache__/
*.pyc
*.pyo
.*.swp
.idea

# virtualenv
python/venv*/
Expand Down
62 changes: 56 additions & 6 deletions python/wtte/weibull.py
@@ -1,8 +1,15 @@
"""
Wrapper for Python Weibull functions
"""
import numpy as np
# Python weibull functions


def cumulative_hazard(t, a, b):
""" Cumulative hazard
:param t: Value
:param a: Alpha
:param b: Beta
:return: `np.power(t / a, b)`
"""
t = np.double(t)
return np.power(t / a, b)

Expand All @@ -13,21 +20,49 @@ def hazard(t, a, b):


def cdf(t, a, b):
""" Cumulative distribution function
:param t: Value
:param a: Alpha
:param b: Beta
:return: `1 - np.exp(-np.power(t / a, b))`
"""
t = np.double(t)
return 1 - np.exp(-np.power(t / a, b))


def pdf(t, a, b):
""" Probability distribution function
:param t: Value
:param a: Alpha
:param b: Beta
:return:`(b / a) * np.power(t / a, b - 1) * np.exp(-np.power(t / a, b))`
"""
t = np.double(t)
return (b / a) * np.power(t / a, b - 1) * np.exp(-np.power(t / a, b))


def cmf(t, a, b):
""" Cumulative Mass Function
:param t: Value
:param a: Alpha
:param b: Beta
:return: `cdf(t + 1, a, b)`
"""
t = np.double(t) + 1e-35
return cdf(t + 1, a, b)


def pmf(t, a, b):
""" Probability mass function
:param t:
:param a: Alpha
:param b: Beta
:return: `cdf(t + 1.0, a, b) - cdf(t, a, b)`
"""
t = np.double(t) + 1e-35
return cdf(t + 1.0, a, b) - cdf(t, a, b)

Expand All @@ -41,19 +76,34 @@ def mode(a, b):


def mean(a, b):
# Continuous mean. at most 1 step below discretized mean
# E[T ] <= E[Td] + 1 true for positive distributions.
""" Continuous mean. at most 1 step below discretized mean
`E[T ] <= E[Td] + 1` true for positive distributions.
"""
from scipy.special import gamma
return a * gamma(1.0 + 1.0 / b)


def quantiles(a, b, p):
""" Quantiles
:param a: Alpha
:param b: Beta
:param p:
:return: `a * np.power(-np.log(1.0 - p), 1.0 / b)`
"""
return a * np.power(-np.log(1.0 - p), 1.0 / b)


def mean(a, b):
# Continuous mean. Theoretically at most 1 step below discretized mean
# E[T ] <= E[Td] + 1 true for positive distributions.
"""Continuous mean. Theoretically at most 1 step below discretized mean
`E[T ] <= E[Td] + 1` true for positive distributions.
:param a: Alpha
:param b: Beta
:return: `a * gamma(1.0 + 1.0 / b)`
"""
from scipy.special import gamma
return a * gamma(1.0 + 1.0 / b)

Expand Down
12 changes: 8 additions & 4 deletions python/wtte/wtte.py
Expand Up @@ -27,6 +27,7 @@ def output_lambda(x, init_alpha=1.0, max_beta_value=5.0, max_alpha_value=None):
"""Elementwise (Lambda) computation of alpha and regularized beta.
- Alpha:
(activation)
Exponential units seems to give faster training than
the original papers softplus units. Makes sense due to logarithmic
Expand All @@ -47,7 +48,8 @@ def output_lambda(x, init_alpha=1.0, max_beta_value=5.0, max_alpha_value=None):
(regularization) Use max_beta_value to implicitly regularize the model
(initialization) Fixed to begin moving slowly around 1.0
Assumes tensorflow backend.
.. note::
Assumes `tensorflow` backend.
:param x: tensor with last dimension having length 2 with x[...,0] = alpha, x[...,1] = beta
:param init_alpha: initial value of `alpha`. Default value is 1.0.
Expand Down Expand Up @@ -95,9 +97,6 @@ class output_activation(object):
Wrapper
- Usage
:Example:
.. code-block:: python
wtte_activation = wtte.output_activation(init_alpha=1.,
Expand All @@ -113,6 +112,11 @@ def __init__(self, init_alpha=1.0, max_beta_value=5.0):
self.max_beta_value = max_beta_value

def activation(self, ab):
""" (Internal function) Activation wrapper
:param ab: original tensor with alpha and beta.
:return ab: return of `output_lambda` with `init_alpha` and `max_beta_value`.
"""
ab = output_lambda(ab, init_alpha=self.init_alpha,
max_beta_value=self.max_beta_value)

Expand Down

0 comments on commit f35743e

Please sign in to comment.