/
introduction-to-tokenizers.html
527 lines (494 loc) · 37.9 KB
/
introduction-to-tokenizers.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta name="generator" content="pandoc" />
<meta http-equiv="X-UA-Compatible" content="IE=EDGE" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="author" content="Lincoln Mullen" />
<title>Introduction to the tokenizers Package</title>
<script>// Pandoc 2.9 adds attributes on both header and div. We remove the former (to
// be compatible with the behavior of Pandoc < 2.8).
document.addEventListener('DOMContentLoaded', function(e) {
var hs = document.querySelectorAll("div.section[class*='level'] > :first-child");
var i, h, a;
for (i = 0; i < hs.length; i++) {
h = hs[i];
if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6
a = h.attributes;
while (a.length > 0) h.removeAttribute(a[0].name);
}
});
</script>
<style type="text/css">code{white-space: pre;}</style>
<style type="text/css" data-origin="pandoc">
pre > code.sourceCode { white-space: pre; position: relative; }
pre > code.sourceCode > span { display: inline-block; line-height: 1.25; }
pre > code.sourceCode > span:empty { height: 1.2em; }
code.sourceCode > span { color: inherit; text-decoration: inherit; }
div.sourceCode { margin: 1em 0; }
pre.sourceCode { margin: 0; }
@media screen {
div.sourceCode { overflow: auto; }
}
@media print {
pre > code.sourceCode { white-space: pre-wrap; }
pre > code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
}
pre.numberSource code
{ counter-reset: source-line 0; }
pre.numberSource code > span
{ position: relative; left: -4em; counter-increment: source-line; }
pre.numberSource code > span > a:first-child::before
{ content: counter(source-line);
position: relative; left: -1em; text-align: right; vertical-align: baseline;
border: none; display: inline-block;
-webkit-touch-callout: none; -webkit-user-select: none;
-khtml-user-select: none; -moz-user-select: none;
-ms-user-select: none; user-select: none;
padding: 0 4px; width: 4em;
color: #aaaaaa;
}
pre.numberSource { margin-left: 3em; border-left: 1px solid #aaaaaa; padding-left: 4px; }
div.sourceCode
{ }
@media screen {
pre > code.sourceCode > span > a:first-child::before { text-decoration: underline; }
}
code span.al { color: #ff0000; font-weight: bold; } /* Alert */
code span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */
code span.at { color: #7d9029; } /* Attribute */
code span.bn { color: #40a070; } /* BaseN */
code span.bu { } /* BuiltIn */
code span.cf { color: #007020; font-weight: bold; } /* ControlFlow */
code span.ch { color: #4070a0; } /* Char */
code span.cn { color: #880000; } /* Constant */
code span.co { color: #60a0b0; font-style: italic; } /* Comment */
code span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */
code span.do { color: #ba2121; font-style: italic; } /* Documentation */
code span.dt { color: #902000; } /* DataType */
code span.dv { color: #40a070; } /* DecVal */
code span.er { color: #ff0000; font-weight: bold; } /* Error */
code span.ex { } /* Extension */
code span.fl { color: #40a070; } /* Float */
code span.fu { color: #06287e; } /* Function */
code span.im { } /* Import */
code span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */
code span.kw { color: #007020; font-weight: bold; } /* Keyword */
code span.op { color: #666666; } /* Operator */
code span.ot { color: #007020; } /* Other */
code span.pp { color: #bc7a00; } /* Preprocessor */
code span.sc { color: #4070a0; } /* SpecialChar */
code span.ss { color: #bb6688; } /* SpecialString */
code span.st { color: #4070a0; } /* String */
code span.va { color: #19177c; } /* Variable */
code span.vs { color: #4070a0; } /* VerbatimString */
code span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */
</style>
<script>
// apply pandoc div.sourceCode style to pre.sourceCode instead
(function() {
var sheets = document.styleSheets;
for (var i = 0; i < sheets.length; i++) {
if (sheets[i].ownerNode.dataset["origin"] !== "pandoc") continue;
try { var rules = sheets[i].cssRules; } catch (e) { continue; }
for (var j = 0; j < rules.length; j++) {
var rule = rules[j];
// check if there is a div.sourceCode rule
if (rule.type !== rule.STYLE_RULE || rule.selectorText !== "div.sourceCode") continue;
var style = rule.style.cssText;
// check if color or background-color is set
if (rule.style.color === '' && rule.style.backgroundColor === '') continue;
// replace div.sourceCode by a pre.sourceCode rule
sheets[i].deleteRule(j);
sheets[i].insertRule('pre.sourceCode{' + style + '}', j);
}
}
})();
</script>
<style type="text/css">body {
background-color: #fff;
margin: 1em auto;
max-width: 700px;
overflow: visible;
padding-left: 2em;
padding-right: 2em;
font-family: "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 14px;
line-height: 1.35;
}
#TOC {
clear: both;
margin: 0 0 10px 10px;
padding: 4px;
width: 400px;
border: 1px solid #CCCCCC;
border-radius: 5px;
background-color: #f6f6f6;
font-size: 13px;
line-height: 1.3;
}
#TOC .toctitle {
font-weight: bold;
font-size: 15px;
margin-left: 5px;
}
#TOC ul {
padding-left: 40px;
margin-left: -1.5em;
margin-top: 5px;
margin-bottom: 5px;
}
#TOC ul ul {
margin-left: -2em;
}
#TOC li {
line-height: 16px;
}
table {
margin: 1em auto;
border-width: 1px;
border-color: #DDDDDD;
border-style: outset;
border-collapse: collapse;
}
table th {
border-width: 2px;
padding: 5px;
border-style: inset;
}
table td {
border-width: 1px;
border-style: inset;
line-height: 18px;
padding: 5px 5px;
}
table, table th, table td {
border-left-style: none;
border-right-style: none;
}
table thead, table tr.even {
background-color: #f7f7f7;
}
p {
margin: 0.5em 0;
}
blockquote {
background-color: #f6f6f6;
padding: 0.25em 0.75em;
}
hr {
border-style: solid;
border: none;
border-top: 1px solid #777;
margin: 28px 0;
}
dl {
margin-left: 0;
}
dl dd {
margin-bottom: 13px;
margin-left: 13px;
}
dl dt {
font-weight: bold;
}
ul {
margin-top: 0;
}
ul li {
list-style: circle outside;
}
ul ul {
margin-bottom: 0;
}
pre, code {
background-color: #f7f7f7;
border-radius: 3px;
color: #333;
white-space: pre-wrap;
}
pre {
border-radius: 3px;
margin: 5px 0px 10px 0px;
padding: 10px;
}
pre:not([class]) {
background-color: #f7f7f7;
}
code {
font-family: Consolas, Monaco, 'Courier New', monospace;
font-size: 85%;
}
p > code, li > code {
padding: 2px 0px;
}
div.figure {
text-align: center;
}
img {
background-color: #FFFFFF;
padding: 2px;
border: 1px solid #DDDDDD;
border-radius: 3px;
border: 1px solid #CCCCCC;
margin: 0 5px;
}
h1 {
margin-top: 0;
font-size: 35px;
line-height: 40px;
}
h2 {
border-bottom: 4px solid #f7f7f7;
padding-top: 10px;
padding-bottom: 2px;
font-size: 145%;
}
h3 {
border-bottom: 2px solid #f7f7f7;
padding-top: 10px;
font-size: 120%;
}
h4 {
border-bottom: 1px solid #f7f7f7;
margin-left: 8px;
font-size: 105%;
}
h5, h6 {
border-bottom: 1px solid #ccc;
font-size: 105%;
}
a {
color: #0033dd;
text-decoration: none;
}
a:hover {
color: #6666ff; }
a:visited {
color: #800080; }
a:visited:hover {
color: #BB00BB; }
a[href^="http:"] {
text-decoration: underline; }
a[href^="https:"] {
text-decoration: underline; }
code > span.kw { color: #555; font-weight: bold; }
code > span.dt { color: #902000; }
code > span.dv { color: #40a070; }
code > span.bn { color: #d14; }
code > span.fl { color: #d14; }
code > span.ch { color: #d14; }
code > span.st { color: #d14; }
code > span.co { color: #888888; font-style: italic; }
code > span.ot { color: #007020; }
code > span.al { color: #ff0000; font-weight: bold; }
code > span.fu { color: #900; font-weight: bold; }
code > span.er { color: #a61717; background-color: #e3d2d2; }
</style>
</head>
<body>
<h1 class="title toc-ignore">Introduction to the tokenizers Package</h1>
<h4 class="author">Lincoln Mullen</h4>
<div id="package-overview" class="section level2">
<h2>Package overview</h2>
<p>In natural language processing, tokenization is the process of breaking human-readable text into machine readable components. The most obvious way to tokenize a text is to split the text into words. But there are many other ways to tokenize a text, the most useful of which are provided by this package.</p>
<p>The tokenizers in this package have a consistent interface. They all take either a character vector of any length, or a list where each element is a character vector of length one. The idea is that each element comprises a text. Then each function returns a list with the same length as the input vector, where each element in the list contains the tokens generated by the function. If the input character vector or list is named, then the names are preserved, so that the names can serve as identifiers.</p>
<p>Using the following sample text, the rest of this vignette demonstrates the different kinds of tokenizers in this package.</p>
<div class="sourceCode" id="cb1"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb1-1"><a href="#cb1-1"></a><span class="kw">library</span>(tokenizers)</span>
<span id="cb1-2"><a href="#cb1-2"></a><span class="kw">options</span>(<span class="dt">max.print =</span> <span class="dv">25</span>)</span>
<span id="cb1-3"><a href="#cb1-3"></a></span>
<span id="cb1-4"><a href="#cb1-4"></a>james <-<span class="st"> </span><span class="kw">paste0</span>(</span>
<span id="cb1-5"><a href="#cb1-5"></a> <span class="st">"The question thus becomes a verbal one</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-6"><a href="#cb1-6"></a> <span class="st">"again; and our knowledge of all these early stages of thought and feeling</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-7"><a href="#cb1-7"></a> <span class="st">"is in any case so conjectural and imperfect that farther discussion would</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-8"><a href="#cb1-8"></a> <span class="st">"not be worth while.</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-9"><a href="#cb1-9"></a> <span class="st">"</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-10"><a href="#cb1-10"></a> <span class="st">"Religion, therefore, as I now ask you arbitrarily to take it, shall mean</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-11"><a href="#cb1-11"></a> <span class="st">"for us _the feelings, acts, and experiences of individual men in their</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-12"><a href="#cb1-12"></a> <span class="st">"solitude, so far as they apprehend themselves to stand in relation to</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-13"><a href="#cb1-13"></a> <span class="st">"whatever they may consider the divine_. Since the relation may be either</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-14"><a href="#cb1-14"></a> <span class="st">"moral, physical, or ritual, it is evident that out of religion in the</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-15"><a href="#cb1-15"></a> <span class="st">"sense in which we take it, theologies, philosophies, and ecclesiastical</span><span class="ch">\n</span><span class="st">"</span>,</span>
<span id="cb1-16"><a href="#cb1-16"></a> <span class="st">"organizations may secondarily grow.</span><span class="ch">\n</span><span class="st">"</span></span>
<span id="cb1-17"><a href="#cb1-17"></a>)</span></code></pre></div>
</div>
<div id="character-and-character-shingle-tokenizers" class="section level2">
<h2>Character and character-shingle tokenizers</h2>
<p>The character tokenizer splits texts into individual characters.</p>
<div class="sourceCode" id="cb2"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb2-1"><a href="#cb2-1"></a><span class="kw">tokenize_characters</span>(james)[[<span class="dv">1</span>]] </span>
<span id="cb2-2"><a href="#cb2-2"></a><span class="co">#> [1] "t" "h" "e" "q" "u" "e" "s" "t" "i" "o" "n" "t" "h" "u" "s" "b" "e" "c" "o"</span></span>
<span id="cb2-3"><a href="#cb2-3"></a><span class="co">#> [20] "m" "e" "s" "a" "v" "e"</span></span>
<span id="cb2-4"><a href="#cb2-4"></a><span class="co">#> [ reached getOption("max.print") -- omitted 517 entries ]</span></span></code></pre></div>
<p>You can also tokenize into character-based shingles.</p>
<div class="sourceCode" id="cb3"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb3-1"><a href="#cb3-1"></a><span class="kw">tokenize_character_shingles</span>(james, <span class="dt">n =</span> <span class="dv">3</span>, <span class="dt">n_min =</span> <span class="dv">3</span>, </span>
<span id="cb3-2"><a href="#cb3-2"></a> <span class="dt">strip_non_alphanum =</span> <span class="ot">FALSE</span>)[[<span class="dv">1</span>]][<span class="dv">1</span><span class="op">:</span><span class="dv">20</span>]</span>
<span id="cb3-3"><a href="#cb3-3"></a><span class="co">#> [1] "the" "he " "e q" " qu" "que" "ues" "est" "sti" "tio" "ion" "on " "n t"</span></span>
<span id="cb3-4"><a href="#cb3-4"></a><span class="co">#> [13] " th" "thu" "hus" "us " "s b" " be" "bec" "eco"</span></span></code></pre></div>
</div>
<div id="word-and-word-stem-tokenizers" class="section level2">
<h2>Word and word-stem tokenizers</h2>
<p>The word tokenizer splits texts into words.</p>
<div class="sourceCode" id="cb4"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb4-1"><a href="#cb4-1"></a><span class="kw">tokenize_words</span>(james)</span>
<span id="cb4-2"><a href="#cb4-2"></a><span class="co">#> [[1]]</span></span>
<span id="cb4-3"><a href="#cb4-3"></a><span class="co">#> [1] "the" "question" "thus" "becomes" "a" "verbal" </span></span>
<span id="cb4-4"><a href="#cb4-4"></a><span class="co">#> [7] "one" "again" "and" "our" "knowledge" "of" </span></span>
<span id="cb4-5"><a href="#cb4-5"></a><span class="co">#> [13] "all" "these" "early" "stages" "of" "thought" </span></span>
<span id="cb4-6"><a href="#cb4-6"></a><span class="co">#> [19] "and" "feeling" "is" "in" "any" "case" </span></span>
<span id="cb4-7"><a href="#cb4-7"></a><span class="co">#> [25] "so" </span></span>
<span id="cb4-8"><a href="#cb4-8"></a><span class="co">#> [ reached getOption("max.print") -- omitted 87 entries ]</span></span></code></pre></div>
<p>Word stemming is provided by the <a href="https://cran.r-project.org/package=SnowballC">SnowballC</a> package.</p>
<div class="sourceCode" id="cb5"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb5-1"><a href="#cb5-1"></a><span class="kw">tokenize_word_stems</span>(james)</span>
<span id="cb5-2"><a href="#cb5-2"></a><span class="co">#> [[1]]</span></span>
<span id="cb5-3"><a href="#cb5-3"></a><span class="co">#> [1] "the" "question" "thus" "becom" "a" "verbal" </span></span>
<span id="cb5-4"><a href="#cb5-4"></a><span class="co">#> [7] "one" "again" "and" "our" "knowledg" "of" </span></span>
<span id="cb5-5"><a href="#cb5-5"></a><span class="co">#> [13] "all" "these" "earli" "stage" "of" "thought" </span></span>
<span id="cb5-6"><a href="#cb5-6"></a><span class="co">#> [19] "and" "feel" "is" "in" "ani" "case" </span></span>
<span id="cb5-7"><a href="#cb5-7"></a><span class="co">#> [25] "so" </span></span>
<span id="cb5-8"><a href="#cb5-8"></a><span class="co">#> [ reached getOption("max.print") -- omitted 87 entries ]</span></span></code></pre></div>
<p>You can also provide a vector of stopwords which will be omitted. The <a href="https://github.com/quanteda/stopwords">stopwords package</a>, which contains stopwords for many languages from several sources, is recommended. This argument also works with the n-gram and skip n-gram tokenizers.</p>
<div class="sourceCode" id="cb6"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb6-1"><a href="#cb6-1"></a><span class="kw">library</span>(stopwords)</span>
<span id="cb6-2"><a href="#cb6-2"></a><span class="kw">tokenize_words</span>(james, <span class="dt">stopwords =</span> stopwords<span class="op">::</span><span class="kw">stopwords</span>(<span class="st">"en"</span>))</span>
<span id="cb6-3"><a href="#cb6-3"></a><span class="co">#> [[1]]</span></span>
<span id="cb6-4"><a href="#cb6-4"></a><span class="co">#> [1] "question" "thus" "becomes" "verbal" "one" </span></span>
<span id="cb6-5"><a href="#cb6-5"></a><span class="co">#> [6] "knowledge" "early" "stages" "thought" "feeling" </span></span>
<span id="cb6-6"><a href="#cb6-6"></a><span class="co">#> [11] "case" "conjectural" "imperfect" "farther" "discussion" </span></span>
<span id="cb6-7"><a href="#cb6-7"></a><span class="co">#> [16] "worth" "religion" "therefore" "now" "ask" </span></span>
<span id="cb6-8"><a href="#cb6-8"></a><span class="co">#> [21] "arbitrarily" "take" "shall" "mean" "us" </span></span>
<span id="cb6-9"><a href="#cb6-9"></a><span class="co">#> [ reached getOption("max.print") -- omitted 33 entries ]</span></span></code></pre></div>
<p>An alternative word stemmer often used in NLP that preserves punctuation and separates common English contractions is the Penn Treebank tokenizer.</p>
<div class="sourceCode" id="cb7"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb7-1"><a href="#cb7-1"></a><span class="kw">tokenize_ptb</span>(james)</span>
<span id="cb7-2"><a href="#cb7-2"></a><span class="co">#> [[1]]</span></span>
<span id="cb7-3"><a href="#cb7-3"></a><span class="co">#> [1] "The" "question" "thus" "becomes" "a" "verbal" </span></span>
<span id="cb7-4"><a href="#cb7-4"></a><span class="co">#> [7] "one" "again" ";" "and" "our" "knowledge"</span></span>
<span id="cb7-5"><a href="#cb7-5"></a><span class="co">#> [13] "of" "all" "these" "early" "stages" "of" </span></span>
<span id="cb7-6"><a href="#cb7-6"></a><span class="co">#> [19] "thought" "and" "feeling" "is" "in" "any" </span></span>
<span id="cb7-7"><a href="#cb7-7"></a><span class="co">#> [25] "case" </span></span>
<span id="cb7-8"><a href="#cb7-8"></a><span class="co">#> [ reached getOption("max.print") -- omitted 101 entries ]</span></span></code></pre></div>
</div>
<div id="n-gram-and-skip-n-gram-tokenizers" class="section level2">
<h2>N-gram and skip n-gram tokenizers</h2>
<p>An n-gram is a contiguous sequence of words containing at least <code>n_min</code> words and at most <code>n</code> words. This function will generate all such combinations of n-grams, omitting stopwords if desired.</p>
<div class="sourceCode" id="cb8"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb8-1"><a href="#cb8-1"></a><span class="kw">tokenize_ngrams</span>(james, <span class="dt">n =</span> <span class="dv">5</span>, <span class="dt">n_min =</span> <span class="dv">2</span>,</span>
<span id="cb8-2"><a href="#cb8-2"></a> <span class="dt">stopwords =</span> stopwords<span class="op">::</span><span class="kw">stopwords</span>(<span class="st">"en"</span>))</span>
<span id="cb8-3"><a href="#cb8-3"></a><span class="co">#> [[1]]</span></span>
<span id="cb8-4"><a href="#cb8-4"></a><span class="co">#> [1] "question thus" </span></span>
<span id="cb8-5"><a href="#cb8-5"></a><span class="co">#> [2] "question thus becomes" </span></span>
<span id="cb8-6"><a href="#cb8-6"></a><span class="co">#> [3] "question thus becomes verbal" </span></span>
<span id="cb8-7"><a href="#cb8-7"></a><span class="co">#> [4] "question thus becomes verbal one" </span></span>
<span id="cb8-8"><a href="#cb8-8"></a><span class="co">#> [5] "thus becomes" </span></span>
<span id="cb8-9"><a href="#cb8-9"></a><span class="co">#> [6] "thus becomes verbal" </span></span>
<span id="cb8-10"><a href="#cb8-10"></a><span class="co">#> [7] "thus becomes verbal one" </span></span>
<span id="cb8-11"><a href="#cb8-11"></a><span class="co">#> [8] "thus becomes verbal one knowledge" </span></span>
<span id="cb8-12"><a href="#cb8-12"></a><span class="co">#> [9] "becomes verbal" </span></span>
<span id="cb8-13"><a href="#cb8-13"></a><span class="co">#> [10] "becomes verbal one" </span></span>
<span id="cb8-14"><a href="#cb8-14"></a><span class="co">#> [11] "becomes verbal one knowledge" </span></span>
<span id="cb8-15"><a href="#cb8-15"></a><span class="co">#> [12] "becomes verbal one knowledge early" </span></span>
<span id="cb8-16"><a href="#cb8-16"></a><span class="co">#> [13] "verbal one" </span></span>
<span id="cb8-17"><a href="#cb8-17"></a><span class="co">#> [14] "verbal one knowledge" </span></span>
<span id="cb8-18"><a href="#cb8-18"></a><span class="co">#> [15] "verbal one knowledge early" </span></span>
<span id="cb8-19"><a href="#cb8-19"></a><span class="co">#> [16] "verbal one knowledge early stages" </span></span>
<span id="cb8-20"><a href="#cb8-20"></a><span class="co">#> [17] "one knowledge" </span></span>
<span id="cb8-21"><a href="#cb8-21"></a><span class="co">#> [18] "one knowledge early" </span></span>
<span id="cb8-22"><a href="#cb8-22"></a><span class="co">#> [19] "one knowledge early stages" </span></span>
<span id="cb8-23"><a href="#cb8-23"></a><span class="co">#> [20] "one knowledge early stages thought" </span></span>
<span id="cb8-24"><a href="#cb8-24"></a><span class="co">#> [21] "knowledge early" </span></span>
<span id="cb8-25"><a href="#cb8-25"></a><span class="co">#> [22] "knowledge early stages" </span></span>
<span id="cb8-26"><a href="#cb8-26"></a><span class="co">#> [23] "knowledge early stages thought" </span></span>
<span id="cb8-27"><a href="#cb8-27"></a><span class="co">#> [24] "knowledge early stages thought feeling"</span></span>
<span id="cb8-28"><a href="#cb8-28"></a><span class="co">#> [25] "early stages" </span></span>
<span id="cb8-29"><a href="#cb8-29"></a><span class="co">#> [ reached getOption("max.print") -- omitted 197 entries ]</span></span></code></pre></div>
<p>A skip n-gram is like an n-gram in that it takes the <code>n</code> and <code>n_min</code> parameters. But rather than returning contiguous sequences of words, it will also return sequences of n-grams skipping words with gaps between <code>0</code> and the value of <code>k</code>. This function generates all such sequences, again omitting stopwords if desired. Note that the number of tokens returned can be very large.</p>
<div class="sourceCode" id="cb9"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb9-1"><a href="#cb9-1"></a><span class="kw">tokenize_skip_ngrams</span>(james, <span class="dt">n =</span> <span class="dv">5</span>, <span class="dt">n_min =</span> <span class="dv">2</span>, <span class="dt">k =</span> <span class="dv">2</span>,</span>
<span id="cb9-2"><a href="#cb9-2"></a> <span class="dt">stopwords =</span> stopwords<span class="op">::</span><span class="kw">stopwords</span>(<span class="st">"en"</span>))</span>
<span id="cb9-3"><a href="#cb9-3"></a><span class="co">#> [[1]]</span></span>
<span id="cb9-4"><a href="#cb9-4"></a><span class="co">#> [1] "question thus" "question becomes" </span></span>
<span id="cb9-5"><a href="#cb9-5"></a><span class="co">#> [3] "question verbal" "question thus becomes" </span></span>
<span id="cb9-6"><a href="#cb9-6"></a><span class="co">#> [5] "question thus verbal" "question thus one" </span></span>
<span id="cb9-7"><a href="#cb9-7"></a><span class="co">#> [7] "question becomes verbal" "question becomes one" </span></span>
<span id="cb9-8"><a href="#cb9-8"></a><span class="co">#> [9] "question becomes knowledge" "question verbal one" </span></span>
<span id="cb9-9"><a href="#cb9-9"></a><span class="co">#> [11] "question verbal knowledge" "question verbal early" </span></span>
<span id="cb9-10"><a href="#cb9-10"></a><span class="co">#> [13] "question thus becomes verbal" "question thus becomes one" </span></span>
<span id="cb9-11"><a href="#cb9-11"></a><span class="co">#> [15] "question thus becomes knowledge" "question thus verbal one" </span></span>
<span id="cb9-12"><a href="#cb9-12"></a><span class="co">#> [17] "question thus verbal knowledge" "question thus verbal early" </span></span>
<span id="cb9-13"><a href="#cb9-13"></a><span class="co">#> [19] "question thus one knowledge" "question thus one early" </span></span>
<span id="cb9-14"><a href="#cb9-14"></a><span class="co">#> [21] "question thus one stages" "question becomes verbal one" </span></span>
<span id="cb9-15"><a href="#cb9-15"></a><span class="co">#> [23] "question becomes verbal knowledge" "question becomes verbal early" </span></span>
<span id="cb9-16"><a href="#cb9-16"></a><span class="co">#> [25] "question becomes one knowledge" </span></span>
<span id="cb9-17"><a href="#cb9-17"></a><span class="co">#> [ reached getOption("max.print") -- omitted 6083 entries ]</span></span></code></pre></div>
</div>
<div id="tweet-tokenizer" class="section level2">
<h2>Tweet tokenizer</h2>
<p>Tokenizing tweets requires special attention, since usernames (<code>@whoever</code>) and hashtags (<code>#hashtag</code>) use special characters that might otherwise be stripped away.</p>
<div class="sourceCode" id="cb10"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb10-1"><a href="#cb10-1"></a><span class="kw">tokenize_tweets</span>(<span class="st">"Welcome, @user, to the tokenizers package. #rstats #forever"</span>)</span>
<span id="cb10-2"><a href="#cb10-2"></a><span class="co">#> [[1]]</span></span>
<span id="cb10-3"><a href="#cb10-3"></a><span class="co">#> [1] "welcome" "@user" "to" "the" "tokenizers"</span></span>
<span id="cb10-4"><a href="#cb10-4"></a><span class="co">#> [6] "package" "#rstats" "#forever"</span></span></code></pre></div>
</div>
<div id="sentence-and-paragraph-tokenizers" class="section level2">
<h2>Sentence and paragraph tokenizers</h2>
<p>Sometimes it is desirable to split texts into sentences or paragraphs prior to tokenizing into other forms.</p>
<div class="sourceCode" id="cb11"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb11-1"><a href="#cb11-1"></a><span class="kw">tokenize_sentences</span>(james) </span></code></pre></div>
<pre><code>#> [[1]]
#> [1] "The question thus becomes a verbal one again; and our knowledge of all these early stages of thought and feeling is in any case so conjectural and imperfect that farther discussion would not be worth while."
#> [2] "Religion, therefore, as I now ask you arbitrarily to take it, shall mean for us _the feelings, acts, and experiences of individual men in their solitude, so far as they apprehend themselves to stand in relation to whatever they may consider the divine_."
#> [3] "Since the relation may be either moral, physical, or ritual, it is evident that out of religion in the sense in which we take it, theologies, philosophies, and ecclesiastical organizations may secondarily grow."</code></pre>
<div class="sourceCode" id="cb13"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb13-1"><a href="#cb13-1"></a><span class="kw">tokenize_paragraphs</span>(james)</span></code></pre></div>
<pre><code>#> [[1]]
#> [1] "The question thus becomes a verbal one again; and our knowledge of all these early stages of thought and feeling is in any case so conjectural and imperfect that farther discussion would not be worth while."
#> [2] "Religion, therefore, as I now ask you arbitrarily to take it, shall mean for us _the feelings, acts, and experiences of individual men in their solitude, so far as they apprehend themselves to stand in relation to whatever they may consider the divine_. Since the relation may be either moral, physical, or ritual, it is evident that out of religion in the sense in which we take it, theologies, philosophies, and ecclesiastical organizations may secondarily grow. "</code></pre>
</div>
<div id="text-chunking" class="section level2">
<h2>Text chunking</h2>
<p>When one has a very long document, sometimes it is desirable to split the document into smaller chunks, each with the same length. This function chunks a document and gives it each of the chunks an ID to show their order. These chunks can then be further tokenized.</p>
<div class="sourceCode" id="cb15"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb15-1"><a href="#cb15-1"></a>chunks <-<span class="st"> </span><span class="kw">chunk_text</span>(mobydick, <span class="dt">chunk_size =</span> <span class="dv">100</span>, <span class="dt">doc_id =</span> <span class="st">"mobydick"</span>)</span>
<span id="cb15-2"><a href="#cb15-2"></a><span class="kw">length</span>(chunks)</span>
<span id="cb15-3"><a href="#cb15-3"></a><span class="co">#> [1] 2195</span></span>
<span id="cb15-4"><a href="#cb15-4"></a>chunks[<span class="dv">5</span><span class="op">:</span><span class="dv">6</span>]</span>
<span id="cb15-5"><a href="#cb15-5"></a><span class="co">#> $`mobydick-0005`</span></span>
<span id="cb15-6"><a href="#cb15-6"></a><span class="co">#> [1] "of a poor devil of a sub sub appears to have gone through the long vaticans and street stalls of the earth picking up whatever random allusions to whales he could anyways find in any book whatsoever sacred or profane therefore you must not in every case at least take the higgledy piggledy whale statements however authentic in these extracts for veritable gospel cetology far from it as touching the ancient authors generally as well as the poets here appearing these extracts are solely valuable or entertaining as affording a glancing bird's eye view of what has been promiscuously said"</span></span>
<span id="cb15-7"><a href="#cb15-7"></a><span class="co">#> </span></span>
<span id="cb15-8"><a href="#cb15-8"></a><span class="co">#> $`mobydick-0006`</span></span>
<span id="cb15-9"><a href="#cb15-9"></a><span class="co">#> [1] "thought fancied and sung of leviathan by many nations and generations including our own so fare thee well poor devil of a sub sub whose commentator i am thou belongest to that hopeless sallow tribe which no wine of this world will ever warm and for whom even pale sherry would be too rosy strong but with whom one sometimes loves to sit and feel poor devilish too and grow convivial upon tears and say to them bluntly with full eyes and empty glasses and in not altogether unpleasant sadness give it up sub subs for by how much the"</span></span>
<span id="cb15-10"><a href="#cb15-10"></a><span class="kw">tokenize_words</span>(chunks[<span class="dv">5</span><span class="op">:</span><span class="dv">6</span>])</span>
<span id="cb15-11"><a href="#cb15-11"></a><span class="co">#> $`mobydick-0005`</span></span>
<span id="cb15-12"><a href="#cb15-12"></a><span class="co">#> [1] "of" "a" "poor" "devil" "of" "a" </span></span>
<span id="cb15-13"><a href="#cb15-13"></a><span class="co">#> [7] "sub" "sub" "appears" "to" "have" "gone" </span></span>
<span id="cb15-14"><a href="#cb15-14"></a><span class="co">#> [13] "through" "the" "long" "vaticans" "and" "street" </span></span>
<span id="cb15-15"><a href="#cb15-15"></a><span class="co">#> [19] "stalls" "of" "the" "earth" "picking" "up" </span></span>
<span id="cb15-16"><a href="#cb15-16"></a><span class="co">#> [25] "whatever"</span></span>
<span id="cb15-17"><a href="#cb15-17"></a><span class="co">#> [ reached getOption("max.print") -- omitted 75 entries ]</span></span>
<span id="cb15-18"><a href="#cb15-18"></a><span class="co">#> </span></span>
<span id="cb15-19"><a href="#cb15-19"></a><span class="co">#> $`mobydick-0006`</span></span>
<span id="cb15-20"><a href="#cb15-20"></a><span class="co">#> [1] "thought" "fancied" "and" "sung" "of" </span></span>
<span id="cb15-21"><a href="#cb15-21"></a><span class="co">#> [6] "leviathan" "by" "many" "nations" "and" </span></span>
<span id="cb15-22"><a href="#cb15-22"></a><span class="co">#> [11] "generations" "including" "our" "own" "so" </span></span>
<span id="cb15-23"><a href="#cb15-23"></a><span class="co">#> [16] "fare" "thee" "well" "poor" "devil" </span></span>
<span id="cb15-24"><a href="#cb15-24"></a><span class="co">#> [21] "of" "a" "sub" "sub" "whose" </span></span>
<span id="cb15-25"><a href="#cb15-25"></a><span class="co">#> [ reached getOption("max.print") -- omitted 75 entries ]</span></span></code></pre></div>
</div>
<div id="counting-words-characters-sentences" class="section level2">
<h2>Counting words, characters, sentences</h2>
<p>The package also offers functions for counting words, characters, and sentences in a format which works nicely with the rest of the functions.</p>
<div class="sourceCode" id="cb16"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb16-1"><a href="#cb16-1"></a><span class="kw">count_words</span>(mobydick)</span>
<span id="cb16-2"><a href="#cb16-2"></a><span class="co">#> mobydick </span></span>
<span id="cb16-3"><a href="#cb16-3"></a><span class="co">#> 219415</span></span>
<span id="cb16-4"><a href="#cb16-4"></a><span class="kw">count_characters</span>(mobydick)</span>
<span id="cb16-5"><a href="#cb16-5"></a><span class="co">#> mobydick </span></span>
<span id="cb16-6"><a href="#cb16-6"></a><span class="co">#> 1235185</span></span>
<span id="cb16-7"><a href="#cb16-7"></a><span class="kw">count_sentences</span>(mobydick)</span>
<span id="cb16-8"><a href="#cb16-8"></a><span class="co">#> mobydick </span></span>
<span id="cb16-9"><a href="#cb16-9"></a><span class="co">#> 29076</span></span></code></pre></div>
</div>
<!-- code folding -->
<!-- dynamically load mathjax for compatibility with self-contained -->
<script>
(function () {
var script = document.createElement("script");
script.type = "text/javascript";
script.src = "https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
document.getElementsByTagName("head")[0].appendChild(script);
})();
</script>
</body>
</html>