Skip to content

Commit

Permalink
version 0.1-3
Browse files Browse the repository at this point in the history
  • Loading branch information
jdonaldson authored and cran-robot committed Jul 15, 2016
1 parent 5864315 commit 0d67ff8
Show file tree
Hide file tree
Showing 7 changed files with 34 additions and 34 deletions.
13 changes: 8 additions & 5 deletions DESCRIPTION
@@ -1,13 +1,16 @@
Package: tsne
Type: Package
Title: T-distributed Stochastic Neighbor Embedding for R (t-SNE)
Version: 0.1-2
Date: 2010-02-19
Title: T-Distributed Stochastic Neighbor Embedding for R (t-SNE)
Version: 0.1-3
Date: 2016-06-04
Author: Justin Donaldson <jdonaldson@gmail.com>
Maintainer: Justin Donaldson <jdonaldson@gmail.com>
Description: A "pure R" implementation of the t-SNE algorithm.
License: GPL
LazyLoad: yes
Packaged: 2012-05-01 16:27:35 UTC; justindonaldson
NeedsCompilation: no
URL: https://github.com/jdonaldson/rtsne/
BugReports: https://github.com/jdonaldson/rtsne/issues
Packaged: 2016-07-15 15:40:42 UTC; jdonaldson
Repository: CRAN
Date/Publication: 2012-05-02 06:34:28
Date/Publication: 2016-07-15 20:02:16
12 changes: 6 additions & 6 deletions MD5
@@ -1,7 +1,7 @@
c0b2fb9370262ac6c5a189ea2aab6606 *DESCRIPTION
8b54e5a89fbda3af5e077053d40bec76 *NAMESPACE
c62fd78c2dbd326aee4a0f43037d2449 *NEWS
89ec2b9a3dc063fc9141420faf288338 *DESCRIPTION
d65aaf552fc71192a426c3b41f3b0c48 *NAMESPACE
a63efcd4c418f6199317015c874204de *NEWS
5b83328297146a191456b5e2299c5123 *R/tsne-internal.R
13e8ed8f8f943eaa5731f88b11ba8148 *R/tsne.R
1bb74773e41979acecc6c87375bef147 *man/tsne-package.Rd
42113a4da50526fa264e890b27861e90 *man/tsne.Rd
a444f7bd180097f3bb7f3b1b61d77d43 *R/tsne.R
bb49103a06187a42f001e2671f2072f7 *man/tsne-package.Rd
b085bd2cdbbc2f88aab4172f4312be83 *man/tsne.Rd
1 change: 1 addition & 0 deletions NAMESPACE
@@ -1 +1,2 @@
exportPattern("^[[:alpha:]]+")
importFrom("stats", "dist", "rnorm")
3 changes: 2 additions & 1 deletion NEWS
@@ -1,3 +1,4 @@
* version 0.1 - Initial release. main tsne function included.
* version 0.1-1 - fixed misc. documentation omissions.
* version 0.1-2 - using an initial configuration parameter automatically places the tsne in the 'late stage' mode, which is mainly focused on small scale adjustments.
* version 0.1-2 - using an initial configuration parameter automatically places the tsne in the 'late stage' mode, which is mainly focused on small scale adjustments.
* version 0.1-3 - fix a broken gain equation, and made distance matrix checks more reliable
27 changes: 10 additions & 17 deletions R/tsne.R
@@ -1,6 +1,6 @@
tsne <-
function(X, initial_config = NULL, k=2, initial_dims=30, perplexity=30, max_iter = 1000, min_cost=0, epoch_callback=NULL,whiten=TRUE, epoch=100 ){
if (class(X) == 'dist') {
if ('dist' %in% class(X)) {
n = attr(X,'Size')
}
else {
Expand All @@ -22,32 +22,28 @@ function(X, initial_config = NULL, k=2, initial_dims=30, perplexity=30, max_iter

eps = 2^(-52) # typical machine precision

if (!is.null(initial_config) && is.matrix(initial_config)) {
if (!is.null(initial_config) && is.matrix(initial_config)) {
if (nrow(initial_config) != n | ncol(initial_config) != k){
stop('initial_config argument does not match necessary configuration for X')
}
ydata = initial_config
initial_P_gain = 1

} else {
ydata = matrix(rnorm(k * n),n)
}

P = .x2p(X,perplexity, 1e-5)$P
# P[is.nan(P)]<-eps
P = .5 * (P + t(P))

P[P < eps]<-eps
P = P/sum(P)



P = P * initial_P_gain
grads = matrix(0,nrow(ydata),ncol(ydata))
incs = matrix(0,nrow(ydata),ncol(ydata))
gains = matrix(1,nrow(ydata),ncol(ydata))


for (iter in 1:max_iter){
if (iter %% epoch == 0) { # epoch
cost = sum(apply(P * log((P+eps)/(Q+eps)),1,sum))
Expand All @@ -57,9 +53,8 @@ function(X, initial_config = NULL, k=2, initial_dims=30, perplexity=30, max_iter

}


sum_ydata = apply(ydata^2, 1, sum)
num = 1/(1 + sum_ydata + sweep(-2 * ydata %*% t(ydata),2, -t(sum_ydata)))
num = 1/(1 + sum_ydata + sweep(-2 * ydata %*% t(ydata),2, -t(sum_ydata)))
diag(num)=0
Q = num / sum(num)
if (any(is.nan(num))) message ('NaN in grad. descent')
Expand All @@ -68,21 +63,19 @@ function(X, initial_config = NULL, k=2, initial_dims=30, perplexity=30, max_iter
for (i in 1:n){
grads[i,] = apply(sweep(-ydata, 2, -ydata[i,]) * stiffnesses[,i],2,sum)
}

gains = (gains + .2) * abs(sign(grads) != sign(incs))
+ gains * .8 * abs(sign(grads) == sign(incs))

gains = ((gains + .2) * abs(sign(grads) != sign(incs)) +
gains * .8 * abs(sign(grads) == sign(incs)))

gains[gains < min_gain] = min_gain

incs = momentum * incs - epsilon * (gains * grads)
ydata = ydata + incs
ydata = sweep(ydata,2,apply(ydata,2,mean))
if (iter == mom_switch_iter) momentum = final_momentum

if (iter == 100 && is.null(initial_config)) P = P/4




}
ydata
}
Expand Down
2 changes: 1 addition & 1 deletion man/tsne-package.Rd
Expand Up @@ -21,7 +21,7 @@ LazyLoad: \tab yes\cr
}
\author{
Justin Donaldson
http://www.scwn.net
https://github.com/jdonaldson/rtsne
Maintainer: Justin Donaldson (jdonaldson@gmail.com)
}
\references{
Expand Down
10 changes: 6 additions & 4 deletions man/tsne.Rd
Expand Up @@ -9,7 +9,9 @@ The t-SNE method for dimensionality reduction
Provides a simple function interface for specifying t-SNE dimensionality reduction on R matrices or "dist" objects.
}
\usage{
tsne(X, initial_config = NULL, k = 2, initial_dims = 30, perplexity = 30, max_iter = 1000, min_cost = 0, epoch_callback = NULL, whiten = TRUE, epoch=100)
tsne(X, initial_config = NULL, k = 2, initial_dims = 30, perplexity = 30,
max_iter = 1000, min_cost = 0, epoch_callback = NULL, whiten = TRUE,
epoch=100)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
Expand All @@ -23,7 +25,7 @@ tsne(X, initial_config = NULL, k = 2, initial_dims = 30, perplexity = 30, max_it
the dimension of the resulting embedding.
}
\item{initial_dims}{
The number of dimensions to use in reduction method.
The number of dimensions to use in reduction method.
}
\item{perplexity}{
Perplexity parameter. (optimal number of neighbors)
Expand Down Expand Up @@ -55,9 +57,9 @@ tsne(X, initial_config = NULL, k = 2, initial_dims = 30, perplexity = 30, max_it

}
\references{
L.J.P. van der Maaten and G.E. Hinton. Visualizing High-Dimensional Data Using t-SNE. \emph{Journal of Machine Learning Research} 9 (Nov) : 2579-2605, 2008.
L.J.P. van der Maaten and G.E. Hinton. Visualizing High-Dimensional Data Using t-SNE. \emph{Journal of Machine Learning Research} 9 (Nov) : 2579-2605, 2008.

L.J.P. van der Maaten. Learning a Parametric Embedding by Preserving Local Structure. In \emph{Proceedings of the Twelfth International Conference on Artificial Intelligence and Statistics} (AISTATS), JMLR W&CP 5:384-391, 2009.
L.J.P. van der Maaten. Learning a Parametric Embedding by Preserving Local Structure. In \emph{Proceedings of the Twelfth International Conference on Artificial Intelligence and Statistics} (AISTATS), JMLR W&CP 5:384-391, 2009.
}
\author{
Justin Donaldson (jdonaldson@gmail.com)
Expand Down

0 comments on commit 0d67ff8

Please sign in to comment.