Skip to content

Commit

Permalink
update train to remove merge errors
Browse files Browse the repository at this point in the history
  • Loading branch information
cbjones committed Mar 4, 2020
1 parent 3d85587 commit f17d488
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 37 deletions.
19 changes: 7 additions & 12 deletions lapart/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,26 +10,21 @@
import numpy as np
import pandas as pd

<<<<<<< HEAD
from .art import ART
=======

from lapart import art
>>>>>>> 6b35226651aa8b96c639cbefdbd644f985e7f011

def norm(data,ma,mi):
tnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
tnorm[i,j] = (data[i,j]-mi[j])/(ma[j] - mi[j])
return tnorm

def dnorm(data,ma,mi):
dnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
dnorm[i,j] = (data[i,j]*(ma[j]-mi[j]))+mi[j]
return dnorm
dnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
dnorm[i,j] = (data[i,j]*(ma[j]-mi[j]))+mi[j]
return dnorm

class test:

Expand Down Expand Up @@ -89,7 +84,7 @@ def lapart_test(self,xA):
for j in range(0,self.nAB):

''' Present Inputs to A Side Templates '''
cmax,ch = art.ART(self.IA,self.TA,self.mA,self.chAm,self.ncA,self.minAm,self.rhoA,self.beta,j)
cmax,ch = ART(self.IA,self.TA,self.mA,self.chAm,self.ncA,self.minAm,self.rhoA,self.beta,j)

if cmax == -1:
''' A Templates do not resonate '''
Expand Down
38 changes: 13 additions & 25 deletions lapart/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,29 +13,18 @@
from .art import ART

def norm(data,ma,mi):

#'''
#tnorm = np.ones((len(data),len(data[0])))
#for i in range(len(data)):
# for j in range(len(data[0])):
# print(data[i,j])
# tnorm[i,j] = (data[i,j]-mi[j])/(ma[j] - mi[j])
#'''
# TODO: Improve normalization

tnorm = np.ones((len(data),1))
for i in range(len(data[0])):
tn = (data[:,i]-mi[i])/(ma[i]-mi[i])
tnorm = np.hstack([tnorm,np.array([tn]).T])

tnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
tnorm[i,j] = (data[i,j]-mi[j])/(ma[j] - mi[j])
return tnorm

def dnorm(data,ma,mi):
dnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
dnorm[i,j] = (data[i,j]*(ma[j]-mi[j]))+mi[j]
return dnorm
dnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
dnorm[i,j] = (data[i,j]*(ma[j]-mi[j]))+mi[j]
return dnorm

class train:

Expand Down Expand Up @@ -88,7 +77,6 @@ def __init__(self,xA,xB,rhoA,rhoB,beta,alpha,nep,TA,TB,L,memory_folder,update_te
self.TB = np.ones((len(self.IB),1))
self.L = np.zeros((len(self.IA[0]),len(self.IB[0])))

print(len(xA[0])*2)
self.minA = np.ones((len(xA[0])*2,1))
self.chAm = np.zeros((len(xA)*10,1))
self.mA = np.zeros((len(xA)*10,1))
Expand Down Expand Up @@ -155,7 +143,7 @@ def lapart_train(self,xA,xB):
for ep in range(self.nep):
for j in range(self.nAB):

cmaxA, chA = art.ART(self.IA,self.TA,self.mA,self.chAm,self.ncA,self.minA,self.rhoA,self.beta,j)
cmaxA, chA = ART(self.IA,self.TA,self.mA,self.chAm,self.ncA,self.minA,self.rhoA,self.beta,j)

if cmaxA == -1:

Expand All @@ -167,7 +155,7 @@ def lapart_train(self,xA,xB):

self.ncA += 1
self.TA = self.CreateTemplate(self.IA,self.TA,self.ncA,j)
cmaxB, chB = art.ART(self.IB,self.TB,self.mB,self.chBm,self.ncB,self.minB,self.rhoB,self.beta,j)
cmaxB, chB = ART(self.IB,self.TB,self.mB,self.chBm,self.ncB,self.minB,self.rhoB,self.beta,j)

if cmaxB == -1:
self.ncB += 1
Expand All @@ -188,7 +176,7 @@ def lapart_train(self,xA,xB):
Present B-Side input and Prime B-Side
Prime = B-Side must consider template associated with A-Side Template
'''
cmaxB, chB = art.ART(self.IB,self.TB,self.mB,self.chBm,self.ncB,self.minB,self.rhoB,self.beta,j)
cmaxB, chB = ART(self.IB,self.TB,self.mB,self.chBm,self.ncB,self.minB,self.rhoB,self.beta,j)

if cmaxB == -1:
'''
Expand Down

0 comments on commit f17d488

Please sign in to comment.