Skip to content

Commit

Permalink
Merge d398793 into d8bc3d8
Browse files Browse the repository at this point in the history
  • Loading branch information
ljchang committed Nov 27, 2017
2 parents d8bc3d8 + d398793 commit 9f135e5
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 55 deletions.
20 changes: 8 additions & 12 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,19 +1,15 @@
language: python

sudo: false

python:
- 2.7
- 3.6

env:
global:
- CONDA_DEPS="pip pytest numpy pandas scipy matplotlib seaborn"

matrix:
include:
# - os: linux
# env:
# - PYTHON_VERSION=3.5
# - MINICONDA_URL="https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh"
- os: linux
env:
- PYTHON_VERSION=2.7
- MINICONDA_URL="https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh"
- MINICONDA_URL="https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh"

before_install:
- export MINICONDA=$HOME/miniconda
Expand All @@ -26,7 +22,7 @@ before_install:
- conda update conda
- conda info -a
- conda config --add channels conda-forge
- conda install python=$PYTHON_VERSION $CONDA_DEPS
- conda install python=$TRAVIS_PYTHON_VERSION $CONDA_DEPS

install:
- pip install -r requirements.txt
Expand Down
85 changes: 42 additions & 43 deletions emotioncf/tests/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@
def simulate_data(data_type = 'data_long'):
i = 100
s = 50
rat = np.random.rand(s,i)*50
for x in np.arange(0,rat.shape[1],5):
rat[0:s/2,x] = rat[0:s/2,x] + x
for x in np.arange(0,rat.shape[1],3):
rat[(s/2):s,x] = rat[(s/2):s,x] + x
rat[(s/2):s] = rat[(s/2):s,::-1]
rat = np.random.rand(s, i)*50
for x in np.arange(0, rat.shape[1], 5):
rat[0:int(s/2), x] = rat[0:int(s/2), x] + x
for x in np.arange(0, rat.shape[1], 3):
rat[int(s/2):s, x] = rat[int(s/2):s, x] + x
rat[int(s/2):s] = rat[int(s/2):s, ::-1]
rat = pd.DataFrame(rat)
if data_type is 'data_long':
out = pd.DataFrame(columns=['Subject','Item','Rating'])
out = pd.DataFrame(columns=['Subject', 'Item', 'Rating'])
for row in rat.iterrows():
sub = pd.DataFrame(columns=out.columns)
sub['Rating'] = row[1]
Expand All @@ -28,14 +28,14 @@ def simulate_data(data_type = 'data_long'):
return rat

def basecf_method_test(cf=None, data=None):
assert cf.train_mask.shape == (50,100)
assert cf.predicted_ratings.shape == (50,100)
assert cf.train_mask.shape == (50, 100)
assert cf.predicted_ratings.shape == (50, 100)
mse = cf.get_mse(data=data)
r = cf.get_corr(data=data)
sub_r = cf.get_sub_corr(data=data)
sub_mse = cf.get_sub_mse(data=data)
assert isinstance(mse,float)
assert isinstance(r,float)
assert isinstance(mse, float)
assert isinstance(r, float)
assert isinstance(sub_r,np.ndarray)
assert len(sub_r) == cf.ratings.shape[0]
assert isinstance(sub_mse,np.ndarray)
Expand All @@ -47,7 +47,7 @@ def basecf_method_test(cf=None, data=None):
print(('mse: %s') % mse)
print(('r: %s') % r)
print(('mean sub r: %s') % np.mean(sub_r))

df = cf.to_long_df()
assert isinstance(df,pd.DataFrame)
if cf.is_predict:
Expand All @@ -57,18 +57,18 @@ def basecf_method_test(cf=None, data=None):
assert df.shape[0] == cf.ratings.shape[0]*cf.ratings.shape[1]*2
if cf.is_mask:
assert 'Mask' in df.columns

def basecf_method_all_tests(cf=None):
cf.plot_predictions()
basecf_method_test(cf=cf, data='all')
basecf_method_test(cf=cf, data='train')
basecf_method_test(cf=cf, data='test')

def test_create_sub_by_item_matrix():
rating = create_sub_by_item_matrix(simulate_data(data_type='data_long'))
assert isinstance(rating,pd.DataFrame)
assert rating.shape == (50,100)

def test_cf_mean():
cf = Mean(simulate_data(data_type='data_wide'))
cf.fit()
Expand All @@ -81,64 +81,64 @@ def test_cf_mean():
cf.fit(dilate_ts_n_samples=2)
cf.predict()
basecf_method_all_tests(cf=cf)


def test_cf_knn():
cf = KNN(simulate_data(data_type='data_wide'))
cf.fit(metric='correlation')
cf.predict()

cf.split_train_test(n_train_items=50)
cf.fit()
cf.predict()
basecf_method_all_tests(cf=cf)

cf.fit(metric='correlation')
cf.predict(k=10)
basecf_method_test(cf=cf, data='all')

cf.fit()
cf.predict(k=10)
basecf_method_all_tests(cf=cf)

cf.fit(dilate_ts_n_samples=2,metric='correlation')
cf.fit(dilate_ts_n_samples=2, metric='correlation')
cf.predict()
basecf_method_all_tests(cf=cf)

cf.fit(metric='cosine')
cf.predict(k=10)
basecf_method_test(cf=cf, data='all')

cf.fit()
cf.predict(k=10)
basecf_method_all_tests(cf=cf)

cf.fit(dilate_ts_n_samples=2,metric='cosine')
cf.fit(dilate_ts_n_samples=2, metric='cosine')
cf.predict()
basecf_method_all_tests(cf=cf)

def test_cf_knn_dil():
cf = KNN(simulate_data(data_type='data_wide'))
cf.split_train_test(n_train_items=20)
cf.fit(dilate_ts_n_samples=2,metric='correlation')
cf.fit(dilate_ts_n_samples=2, metric='correlation')
cf.predict()
basecf_method_all_tests(cf=cf)

cf.fit(dilate_ts_n_samples=2,metric='cosine')
cf.fit(dilate_ts_n_samples=2, metric='cosine')
cf.predict()
basecf_method_all_tests(cf=cf)

def test_cf_nnmf_multiplicative():
cf = NNMF_multiplicative(simulate_data(data_type='data_wide'))
cf.fit()
cf.predict()
cf.split_train_test(n_train_items=50)
cf.fit()
basecf_method_all_tests(cf=cf)

cf.fit(dilate_ts_n_samples=2)
cf.predict()
basecf_method_all_tests(cf=cf)
basecf_method_all_tests(cf=cf)

def test_cf_nnmf_sgd():
cf = NNMF_sgd(simulate_data(data_type='data_wide'))
Expand All @@ -149,7 +149,7 @@ def test_cf_nnmf_sgd():
item_bias_reg=0,
learning_rate=.001)
cf.predict()

cf.split_train_test(n_train_items=50)
cf.fit(n_iterations = 20,
user_fact_reg=0,
Expand All @@ -158,7 +158,7 @@ def test_cf_nnmf_sgd():
item_bias_reg=0,
learning_rate=.001)
basecf_method_all_tests(cf=cf)

cf.fit(n_iterations = 20,
user_fact_reg=0,
item_fact_reg=0,
Expand All @@ -167,24 +167,23 @@ def test_cf_nnmf_sgd():
learning_rate=.001,
dilate_ts_n_samples=2)
cf.predict()
basecf_method_all_tests(cf=cf)
basecf_method_all_tests(cf=cf)

def test_downsample():
cf = Mean(simulate_data(data_type = 'data_wide'))
cf.downsample(sampling_freq=10,target=2, target_type='samples')
cf.downsample(sampling_freq=10, target=2, target_type='samples')
assert cf.ratings.shape == (50,50)
cf = Mean(simulate_data(data_type = 'data_wide'))
cf.downsample(sampling_freq=10,target=5, target_type='hz')
assert cf.ratings.shape == (50,50)
cf.downsample(sampling_freq=10, target=5, target_type='hz')
assert cf.ratings.shape == (50, 50)
cf = Mean(simulate_data(data_type = 'data_wide'))
cf.downsample(sampling_freq=10,target=2, target_type='seconds')
assert cf.ratings.shape == (50,5)
cf.downsample(sampling_freq=10, target=2, target_type='seconds')
assert cf.ratings.shape == (50, 5)
cf = Mean(simulate_data(data_type = 'data_wide'))
cf.split_train_test(n_train_items=20)
cf.fit()
cf.predict()
cf.downsample(sampling_freq=10,target=2, target_type='samples')
assert cf.ratings.shape == (50,50)
assert cf.train_mask.shape == (50,50)
assert cf.predicted_ratings.shape == (50,50)

assert cf.ratings.shape == (50, 50)
assert cf.train_mask.shape == (50, 50)
assert cf.predicted_ratings.shape == (50, 50)

0 comments on commit 9f135e5

Please sign in to comment.