-
Notifications
You must be signed in to change notification settings - Fork 2
/
palavrasgrandes.py
56 lines (32 loc) · 920 Bytes
/
palavrasgrandes.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import codecs
import nltk
from nltk.corpus import stopwords
from pyuca import Collator
c = Collator("allkeys.txt")
arq="catatau.txt"
fileObj = codecs.open( arq, "r", "utf-8" )
catatau = fileObj.read() # Returns a Unicode string from the UTF-8 bytes in the file
# separa em linhas
stok = nltk.data.load('tokenizers/punkt/portuguese.pickle')
catalinhas=stok.tokenize(catatau)
tokens = nltk.word_tokenize(catatau)
#catacorpus=nltk.Text(nltk.word_tokenize(catatau))
# ordenar
tokens.sort()
a = set(tokens)
glossario=list(a)
# usando o padrao de ordenamento do collate pyuca para considerar acentos
glossario=sorted(glossario, key=c.sort_key)
glossario.reverse()
a=""
for w in glossario:
if len(w) >= 13:
a=w+" "+a
print a
############# grava arquivo
file = codecs.open("Neotatau.txt", "w", "utf-8")
file.write(a)
file.close()