-
Notifications
You must be signed in to change notification settings - Fork 0
/
Training Categorize Model.py
82 lines (61 loc) · 2.02 KB
/
Training Categorize Model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import nltk
nltk.download('punkt')
from textblob.classifiers import NaiveBayesClassifier
import csv
import pandas as pd
import random
from itertools import groupby
from operator import itemgetter
import pickle
# df = pd.read_csv('bbc-text.csv', delimiter=',')
# items = [(x[1],x[0]) for x in df.values]
# df = pd.read_csv('twitter_scrapping_labeled100.csv', delimiter=',')
# items = [(x[1],x[2]) for x in df.values][:245]
category = ["tech", "education", "politic", "sport", "business"]
items = []
for c in category:
df = pd.read_csv("{}.csv".format(c), delimiter=',')
rows = [x[1] for x in df.values]
print(len(rows))
for row in rows:
items.append((row, c))
sortkeyfn = itemgetter(1)
items.sort(key=sortkeyfn)
result = {}
for key,valuesiter in groupby(items, key=sortkeyfn):
result[key] = list(v[0] for v in valuesiter)
cl_list = []
cl_name = []
for key in result.keys():
train = []
test = []
cl_name.append(key)
textList = result[key]
random.shuffle(textList)
train_size = 0.9
train_index = int(len(textList)*train_size)
#append base label
for text in textList[:train_index]:
train.append((text, "pos"))
for text in textList[train_index:]:
test.append((text, "pos"))
#append other label
for other_key in result.keys():
if other_key != key:
textList = result[other_key]
random.shuffle(textList)
train_size = 0.9
train_index = int(len(textList)*train_size)
#append other label
for text in textList[:train_index]:
train.append((text, "neg"))
for text in textList[train_index:]:
test.append((text, "neg"))
cl = NaiveBayesClassifier(train)
accuracy = cl.accuracy(test)
print("class :{} train:{} test:{} acc:{}".format(key, len(train), len(test), accuracy))
cl_list.append(cl)
#save model
object = cl_list
file = open('tweet-categorize-multiclass-array.obj','wb')
pickle.dump(object,file)