Skip to content

Commit

Permalink
Merge 9174ecc into 229d5dd
Browse files Browse the repository at this point in the history
  • Loading branch information
kiudee committed Feb 3, 2020
2 parents 229d5dd + 9174ecc commit c9df64d
Show file tree
Hide file tree
Showing 10 changed files with 19 additions and 15 deletions.
2 changes: 2 additions & 0 deletions .coveragerc
@@ -0,0 +1,2 @@
[run]
parallel = True
2 changes: 1 addition & 1 deletion csrank/choicefunction/generalized_linear_model.py
Expand Up @@ -203,7 +203,7 @@ def _fit(self, X, Y, sampler='variational', tune=500, draws=500,
def _predict_scores_fixed(self, X, **kwargs):
d = dict(pm.summary(self.trace)['mean'])
intercept = 0.0
weights = np.array([d['weights__{}'.format(i)] for i in range(self.n_object_features)])
weights = np.array([d['weights[{}]'.format(i)] for i in range(self.n_object_features)])
if 'intercept' in d:
intercept = intercept + d['intercept']
return np.dot(X, weights) + intercept
Expand Down
5 changes: 3 additions & 2 deletions csrank/dataset_reader/labelranking/survey_dataset_reader.py
Expand Up @@ -3,7 +3,8 @@
import numpy as np
import pandas as pd
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.utils import check_random_state

from csrank.constants import LABEL_RANKING
Expand All @@ -28,7 +29,7 @@ def __load_dataset__(self):
context_feature = [float(i) if i != '.' else np.NAN for i in row[13:33]]
features.append(context_feature)
X = np.array(features)
X = Imputer().fit_transform(X)
X = SimpleImputer().fit_transform(X)
X = np.array([np.log(np.array(X[:, i]) + 1) for i in range(len(features[0]))])
X = np.array(X.T)
self.X = StandardScaler().fit_transform(X)
Expand Down
6 changes: 3 additions & 3 deletions csrank/discretechoice/generalized_nested_logit.py
Expand Up @@ -283,11 +283,11 @@ def fit(self, X, Y, sampler='variational', tune=500, draws=500,

def _predict_scores_fixed(self, X, **kwargs):
mean_trace = dict(pm.summary(self.trace)['mean'])
weights = np.array([mean_trace['weights__{}'.format(i)] for i in range(self.n_object_features)])
lambda_k = np.array([mean_trace['lambda_k__{}'.format(i)] for i in range(self.n_nests)])
weights = np.array([mean_trace['weights[{}]'.format(i)] for i in range(self.n_object_features)])
lambda_k = np.array([mean_trace['lambda_k[{}]'.format(i)] for i in range(self.n_nests)])
weights_ik = np.zeros((self.n_object_features, self.n_nests))
for i, k in product(range(self.n_object_features), range(self.n_nests)):
weights_ik[i][k] = mean_trace['weights_ik__{}_{}'.format(i, k)]
weights_ik[i][k] = mean_trace['weights_ik[{},{}]'.format(i, k)]
alpha_ik = np.dot(X, weights_ik)
alpha_ik = npu.softmax(alpha_ik, axis=2)
utility = np.dot(X, weights)
Expand Down
2 changes: 1 addition & 1 deletion csrank/discretechoice/mixed_logit_model.py
Expand Up @@ -187,7 +187,7 @@ def _predict_scores_fixed(self, X, **kwargs):
summary = dict(pm.summary(self.trace)['mean'])
weights = np.zeros((self.n_object_features, self.n_mixtures))
for i, k in product(range(self.n_object_features), range(self.n_mixtures)):
weights[i][k] = summary['weights__{}_{}'.format(i, k)]
weights[i][k] = summary['weights[{},{}]'.format(i, k)]
utility = np.dot(X, weights)
p = np.mean(npu.softmax(utility, axis=1), axis=2)
return p
Expand Down
2 changes: 1 addition & 1 deletion csrank/discretechoice/multinomial_logit_model.py
Expand Up @@ -181,7 +181,7 @@ def fit(self, X, Y, sampler='variational', tune=500, draws=500,
def _predict_scores_fixed(self, X, **kwargs):
d = dict(pm.summary(self.trace)['mean'])
intercept = 0.0
weights = np.array([d['weights__{}'.format(i)] for i in range(self.n_object_features)])
weights = np.array([d['weights[{}]'.format(i)] for i in range(self.n_object_features)])
if 'intercept' in d:
intercept = intercept + d['intercept']
return np.dot(X, weights) + intercept
Expand Down
6 changes: 3 additions & 3 deletions csrank/discretechoice/nested_logit_model.py
Expand Up @@ -342,9 +342,9 @@ def fit(self, X, Y, sampler='variational', tune=500, draws=500,
def _predict_scores_fixed(self, X, **kwargs):
y_nests = self.create_nests(X)
mean_trace = dict(pm.summary(self.trace)['mean'])
weights = np.array([mean_trace['weights__{}'.format(i)] for i in range(self.n_object_features)])
weights_k = np.array([mean_trace['weights_k__{}'.format(i)] for i in range(self.n_object_features)])
lambda_k = np.array([mean_trace['lambda_k__{}'.format(i)] for i in range(self.n_nests)])
weights = np.array([mean_trace['weights[{}]'.format(i)] for i in range(self.n_object_features)])
weights_k = np.array([mean_trace['weights_k[{}]'.format(i)] for i in range(self.n_object_features)])
lambda_k = np.array([mean_trace['lambda_k[{}]'.format(i)] for i in range(self.n_nests)])
weights = (weights / lambda_k[:, None])
utility_k = np.dot(self.features_nests, weights_k)
utility = self._eval_utility_np(X, y_nests, weights)
Expand Down
4 changes: 2 additions & 2 deletions csrank/discretechoice/paired_combinatorial_logit.py
Expand Up @@ -280,8 +280,8 @@ def fit(self, X, Y, sampler='variational', tune=500, draws=500,

def _predict_scores_fixed(self, X, **kwargs):
mean_trace = dict(pm.summary(self.trace)['mean'])
weights = np.array([mean_trace['weights__{}'.format(i)] for i in range(self.n_object_features)])
lambda_k = np.array([mean_trace['lambda_k__{}'.format(i)] for i in range(self.n_nests)])
weights = np.array([mean_trace['weights[{}]'.format(i)] for i in range(self.n_object_features)])
lambda_k = np.array([mean_trace['lambda_k[{}]'.format(i)] for i in range(self.n_nests)])
utility = np.dot(X, weights)
p = self._get_probabilities_np(utility, lambda_k)
return p
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Expand Up @@ -10,7 +10,7 @@ docopt>=0.6.0
joblib>=0.9.4
tqdm>=4.11.2
keras>=2.3
pymc3>=3.5
pymc3>=3.8
theano>=1.0
# Pick either CPU or GPU version of tensorflow:
tensorflow>=1.5,<2.0
Expand Down
3 changes: 2 additions & 1 deletion scripts/create_testenv.sh
Expand Up @@ -22,7 +22,8 @@ then
fi


conda install --yes numpy scipy joblib pytest pytest-cov coverage scikit-learn pandas h5py seaborn mkl-service
conda install --yes numpy scipy joblib pytest pytest-cov pandas h5py seaborn mkl-service
conda install --yes -c conda-forge coverage=5.0.3
pip install sphinx sphinx-autobuild nbsphinx pandoc ipykernel bottleneck sphinx_rtd_theme notebook sphinxcontrib-bibtex
pip install --no-cache-dir --ignore-installed -r requirements.txt

Expand Down

0 comments on commit c9df64d

Please sign in to comment.