-
Notifications
You must be signed in to change notification settings - Fork 107
/
guidedlda.py
359 lines (300 loc) · 12.9 KB
/
guidedlda.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
# coding=utf-8
"""Latent Dirichlet allocation using collapsed Gibbs sampling"""
from __future__ import absolute_import, division, unicode_literals # noqa
import logging
import sys
import numpy as np
import guidedlda._guidedlda
import guidedlda.utils
import random
logger = logging.getLogger('guidedlda')
PY2 = sys.version_info[0] == 2
if PY2:
range = xrange
class GuidedLDA:
"""Guided Latent Dirichlet allocation using collapsed Gibbs sampling
Parameters
----------
n_topics : int
Number of topics
n_iter : int, default 2000
Number of sampling iterations
alpha : float, default 0.1
Dirichlet parameter for distribution over topics
eta : float, default 0.01
Dirichlet parameter for distribution over words
random_state : int or RandomState, optional
The generator used for the initial topics.
Attributes
----------
`components_` : array, shape = [n_topics, n_features]
Point estimate of the topic-word distributions (Phi in literature)
`topic_word_` :
Alias for `components_`
`word_topic_` : array, shape = [n_features, n_topics]
Point estimate of the word-topic distributions
`nzw_` : array, shape = [n_topics, n_features]
Matrix of counts recording topic-word assignments in final iteration.
`ndz_` : array, shape = [n_samples, n_topics]
Matrix of counts recording document-topic assignments in final iteration.
`doc_topic_` : array, shape = [n_samples, n_features]
Point estimate of the document-topic distributions (Theta in literature)
`nz_` : array, shape = [n_topics]
Array of topic assignment counts in final iteration.
Examples
--------
>>> import numpy
>>> X = numpy.array([[1,1], [2, 1], [3, 1], [4, 1], [5, 8], [6, 1]])
>>> import lda
>>> model = lda.LDA(n_topics=2, random_state=0, n_iter=100)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
LDA(alpha=...
>>> model.components_
array([[ 0.85714286, 0.14285714],
[ 0.45 , 0.55 ]])
>>> model.loglikelihood() #doctest: +ELLIPSIS
-40.395...
References
----------
Blei, David M., Andrew Y. Ng, and Michael I. Jordan. "Latent Dirichlet
Allocation." Journal of Machine Learning Research 3 (2003): 993–1022.
Griffiths, Thomas L., and Mark Steyvers. "Finding Scientific Topics."
Proceedings of the National Academy of Sciences 101 (2004): 5228–5235.
doi:10.1073/pnas.0307752101.
Wallach, Hanna, David Mimno, and Andrew McCallum. "Rethinking LDA: Why
Priors Matter." In Advances in Neural Information Processing Systems 22,
edited by Y. Bengio, D. Schuurmans, J. Lafferty, C. K. I. Williams, and A.
Culotta, 1973–1981, 2009.
Buntine, Wray. "Estimating Likelihoods for Topic Models." In Advances in
Machine Learning, First Asian Conference on Machine Learning (2009): 51–64.
doi:10.1007/978-3-642-05224-8_6.
"""
def __init__(self, n_topics, n_iter=2000, alpha=0.01, eta=0.01, random_state=None,
refresh=10):
self.n_topics = n_topics
self.n_iter = n_iter
self.alpha = alpha
self.eta = eta
# if random_state is None, check_random_state(None) does nothing
# other than return the current numpy RandomState
self.random_state = random_state
self.refresh = refresh
if alpha <= 0 or eta <= 0:
raise ValueError("alpha and eta must be greater than zero")
# random numbers that are reused
rng = guidedlda.utils.check_random_state(random_state)
if random_state:
random.seed(random_state)
self._rands = rng.rand(1024**2 // 8) # 1MiB of random variates
# configure console logging if not already configured
if len(logger.handlers) == 1 and isinstance(logger.handlers[0], logging.NullHandler):
logging.basicConfig(level=logging.INFO)
def fit(self, X, y=None, seed_topics={}, seed_confidence=0):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features. Sparse matrix allowed.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X, seed_topics=seed_topics, seed_confidence=seed_confidence)
return self
def fit_transform(self, X, y=None, seed_topics={}, seed_confidence=0):
"""Apply dimensionality reduction on X
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features. Sparse matrix allowed.
Returns
-------
doc_topic : array-like, shape (n_samples, n_topics)
Point estimate of the document-topic distributions
"""
if isinstance(X, np.ndarray):
# in case user passes a (non-sparse) array of shape (n_features,)
# turn it into an array of shape (1, n_features)
X = np.atleast_2d(X)
self._fit(X, seed_topics=seed_topics, seed_confidence=seed_confidence)
return self.doc_topic_
def transform(self, X, max_iter=20, tol=1e-16):
"""Transform the data X according to previously fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
max_iter : int, optional
Maximum number of iterations in iterated-pseudocount estimation.
tol: double, optional
Tolerance value used in stopping condition.
Returns
-------
doc_topic : array-like, shape (n_samples, n_topics)
Point estimate of the document-topic distributions
Note
----
This uses the "iterated pseudo-counts" approach described
in Wallach et al. (2009) and discussed in Buntine (2009).
"""
if isinstance(X, np.ndarray):
# in case user passes a (non-sparse) array of shape (n_features,)
# turn it into an array of shape (1, n_features)
X = np.atleast_2d(X)
doc_topic = np.empty((X.shape[0], self.n_topics))
WS, DS = guidedlda.utils.matrix_to_lists(X)
# TODO: this loop is parallelizable
for d in np.unique(DS):
doc_topic[d] = self._transform_single(WS[DS == d], max_iter, tol)
return doc_topic
def _transform_single(self, doc, max_iter, tol):
"""Transform a single document according to the previously fit model
Parameters
----------
X : 1D numpy array of integers
Each element represents a word in the document
max_iter : int
Maximum number of iterations in iterated-pseudocount estimation.
tol: double
Tolerance value used in stopping condition.
Returns
-------
doc_topic : 1D numpy array of length n_topics
Point estimate of the topic distributions for document
Note
----
See Note in `transform` documentation.
"""
PZS = np.zeros((len(doc), self.n_topics))
for iteration in range(max_iter + 1): # +1 is for initialization
PZS_new = self.components_[:, doc].T
PZS_new *= (PZS.sum(axis=0) - PZS + self.alpha)
PZS_new /= PZS_new.sum(axis=1)[:, np.newaxis] # vector to single column matrix
delta_naive = np.abs(PZS_new - PZS).sum()
logger.debug('transform iter {}, delta {}'.format(iteration, delta_naive))
PZS = PZS_new
if delta_naive < tol:
break
theta_doc = PZS.sum(axis=0) / PZS.sum()
assert len(theta_doc) == self.n_topics
assert theta_doc.shape == (self.n_topics,)
return theta_doc
def _fit(self, X, seed_topics, seed_confidence):
"""Fit the model to the data X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features. Sparse matrix allowed.
"""
random_state = guidedlda.utils.check_random_state(self.random_state)
rands = self._rands.copy()
self._initialize(X, seed_topics, seed_confidence)
for it in range(self.n_iter):
# FIXME: using numpy.roll with a random shift might be faster
random_state.shuffle(rands)
if it % self.refresh == 0:
ll = self.loglikelihood()
logger.info("<{}> log likelihood: {:.0f}".format(it, ll))
# keep track of loglikelihoods for monitoring convergence
self.loglikelihoods_.append(ll)
self._sample_topics(rands)
ll = self.loglikelihood()
logger.info("<{}> log likelihood: {:.0f}".format(self.n_iter - 1, ll))
# note: numpy /= is integer division
self.components_ = (self.nzw_ + self.eta).astype(float)
self.components_ /= np.sum(self.components_, axis=1)[:, np.newaxis]
self.topic_word_ = self.components_
self.word_topic_ = (self.nzw_ + self.eta).astype(float)
self.word_topic_ /= np.sum(self.word_topic_, axis=0)[np.newaxis, :]
self.word_topic_ = self.word_topic_.T
self.doc_topic_ = (self.ndz_ + self.alpha).astype(float)
self.doc_topic_ /= np.sum(self.doc_topic_, axis=1)[:, np.newaxis]
# delete attributes no longer needed after fitting to save memory and reduce clutter
del self.WS
del self.DS
del self.ZS
return self
def _initialize(self, X, seed_topics, seed_confidence):
"""Initialize the document topic distribution.
topic word distribution, etc.
Parameters
----------
seed_topics: type=dict, value={2:0, 256:0, 412:1, 113:1}
"""
D, W = X.shape
N = int(X.sum())
n_topics = self.n_topics
n_iter = self.n_iter
logger.info("n_documents: {}".format(D))
logger.info("vocab_size: {}".format(W))
logger.info("n_words: {}".format(N))
logger.info("n_topics: {}".format(n_topics))
logger.info("n_iter: {}".format(n_iter))
self.beta = 0.1
self.nzw_ = nzw_ = np.zeros((n_topics, W), dtype=np.intc) # + self.beta
self.ndz_ = ndz_ = np.zeros((D, n_topics), dtype=np.intc) # + self.alpha
self.nz_ = nz_ = np.zeros(n_topics, dtype=np.intc)# + W * self.beta
self.WS, self.DS = WS, DS = guidedlda.utils.matrix_to_lists(X)
self.ZS = ZS = np.empty_like(self.WS, dtype=np.intc)
np.testing.assert_equal(N, len(WS))
# seeded Initialization
count_testing = 0
for i in range(N):
w, d = WS[i], DS[i]
if w not in seed_topics:
continue
# check if seeded initialization
if w in seed_topics and random.random() < seed_confidence:
z_new = seed_topics[w]
else:
z_new = i % n_topics
ZS[i] = z_new
ndz_[d, z_new] += 1
nzw_[z_new, w] += 1
nz_[z_new] += 1
# Non seeded Initialization
for i in range(N):
w, d = WS[i], DS[i]
if w in seed_topics:
continue
z_new = i % n_topics
ZS[i] = z_new
ndz_[d, z_new] += 1
nzw_[z_new, w] += 1
nz_[z_new] += 1
self.loglikelihoods_ = []
self.nzw_ = nzw_.astype(np.intc)
self.ndz_ = ndz_.astype(np.intc)
self.nz_ = nz_.astype(np.intc)
def purge_extra_matrices(self):
"""Clears out word topic. document topic. and internal matrix.
Once this method is used. don't call fit_transform again.
Just use the model for predictions.
"""
del self.topic_word_
del self.word_topic_
del self.doc_topic_
del self.nzw_
del self.ndz_
del self.nz_
def loglikelihood(self):
"""Calculate complete log likelihood, log p(w,z)
Formula used is log p(w,z) = log p(w|z) + log p(z)
"""
nzw, ndz, nz = self.nzw_, self.ndz_, self.nz_
alpha = self.alpha
eta = self.eta
nd = np.sum(ndz, axis=1).astype(np.intc)
return guidedlda._guidedlda._loglikelihood(nzw, ndz, nz, nd, alpha, eta)
def _sample_topics(self, rands):
"""Samples all topic assignments. Called once per iteration."""
n_topics, vocab_size = self.nzw_.shape
alpha = np.repeat(self.alpha, n_topics).astype(np.float64)
eta = np.repeat(self.eta, vocab_size).astype(np.float64)
guidedlda._guidedlda._sample_topics(self.WS, self.DS, self.ZS, self.nzw_, self.ndz_, self.nz_,
alpha, eta, rands)