From 77e3625e0d4f0a48376ac111fcf297d6cfaf4552 Mon Sep 17 00:00:00 2001 From: JonasRieger Date: Fri, 15 May 2020 19:18:02 +0200 Subject: [PATCH] help js --- R/jsTopics.R | 11 +++++++---- man/jsTopics.Rd | 11 +++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/R/jsTopics.R b/R/jsTopics.R index 849649d..961eaf3 100644 --- a/R/jsTopics.R +++ b/R/jsTopics.R @@ -7,10 +7,13 @@ #' @details #' The Jensen-Shannon Similarity for two topics \eqn{\bm z_{i}} and #' \eqn{\bm z_{j}} is calculated by -#' \deqn{\text{JS}(\bm z_{i}, \bm z_{j}) = 1 - \left( \text{KLD}(p_i, (p_i+p_j)/2) + \text{KLD}((p_j, (p_i+p_j)/2)) \right)/2 } -#' -#' with \eqn{V} is the vocabulary size and \eqn{n_k^{(v)}} is the count of -#' assignments of the \eqn{v}-th word to the \eqn{k}-th topic. +#' \deqn{JS(\bm z_{i}, \bm z_{j}) = 1 - \left( KLD(p_i, (p_i+p_j)/2) + KLD((p_j, (p_i+p_j)/2)) \right)/2} +#' \deqn{= 1 - \left( KLD(p_i, p_i+p_j) + KLD((p_j, p_i+p_j)) \right)/2 - \log(2)} +#' with \eqn{V} is the vocabulary size, \eqn{p_k = (p_k^{(1)}, ..., p_k^{(V)})}, +#' and \eqn{p_k^{(v)}} is the proportion of assignments of the +#' \eqn{v}-th word to the \eqn{k}-th topic. KLD defines the Kullback-Leibler +#' Divergence calculated by +#' \deqn{KLD(\bm p_{k}, \bm p_{\Sigma}) = \sum_{v=1}^{V} p_k^{(v)} \log{\frac{p_k^{(v)}}{p_{\Sigma}^{(v)}}}.} #' #' @family TopicSimilarity functions #' diff --git a/man/jsTopics.Rd b/man/jsTopics.Rd index f0131a3..5a1daf1 100644 --- a/man/jsTopics.Rd +++ b/man/jsTopics.Rd @@ -50,10 +50,13 @@ Jensen-Shannon Divergence. \details{ The Jensen-Shannon Similarity for two topics \eqn{\bm z_{i}} and \eqn{\bm z_{j}} is calculated by -\deqn{\text{JS}(\bm z_{i}, \bm z_{j}) = 1 - \left( \text{KLD}(p_i, (p_i+p_j)/2) + \text{KLD}((p_j, (p_i+p_j)/2)) \right)/2 } - -with \eqn{V} is the vocabulary size and \eqn{n_k^{(v)}} is the count of -assignments of the \eqn{v}-th word to the \eqn{k}-th topic. +\deqn{JS(\bm z_{i}, \bm z_{j}) = 1 - \left( KLD(p_i, (p_i+p_j)/2) + KLD((p_j, (p_i+p_j)/2)) \right)/2} +\deqn{= 1 - \left( KLD(p_i, p_i+p_j) + KLD((p_j, p_i+p_j)) \right)/2 - \log(2)} +with \eqn{V} is the vocabulary size, \eqn{p_k = (p_k^{(1)}, ..., p_k^{(V)})}, +and \eqn{p_k^{(v)}} is the proportion of assignments of the +\eqn{v}-th word to the \eqn{k}-th topic. KLD defines the Kullback-Leibler +Divergence calculated by +\deqn{KLD(\bm p_{k}, \bm p_{\Sigma}) = \sum_{v=1}^{V} p_k^{(v)} \log{\frac{p_k^{(v)}}{p_{\Sigma}^{(v)}}}.} } \examples{ res = LDARep(docs = reuters_docs, vocab = reuters_vocab, n = 4, K = 10, num.iterations = 30)