forked from stratosphereips/AIP
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgenerate_historical_blocklists.py
executable file
·136 lines (117 loc) · 5.34 KB
/
generate_historical_blocklists.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#!/usr/bin/env python
"""
Tool to generate historical blocklists
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Joaquin Bogado <joaquinbogado@duck.com>"]
__contact__ = "stratosphere@aic.fel.cvut.cz"
__copyright__ = "Copyright 2022, Stratosphere Laboratory."
__credits__ = ["Joaquín Bogado"]
__deprecated__ = False
__license__ = "GPLv3"
__maintainer__ = "Joaquin Bogado"
__version__ = "1.0.0"
import logging
import pandas as pd
import time
from aip.data.access import data_path, project_dir
from aip.models.alpha import Alpha
from aip.models.prioritize import New
from aip.models.prioritize import Consistent
from aip.models.prioritize import RandomForest
from aip.models.prioritize import Knowledgebase
from aip.models.pareto import Pareto
from datetime import date, timedelta
from joblib import Parallel, delayed
from pathlib import Path
from os import makedirs, path, scandir
#project_dir = Path(__file__).resolve().parents[1]
start = '2020-07-05'
end = str(date.today())
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
#logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.basicConfig(level=logging.DEBUG, format=log_fmt)
# not used in this stub but often useful for finding various files
# load up the .env entries as environment variables
#load_dotenv(find_dotenv())
def run_model_alpha(day):
#Alpha Model
output_dir = path.join(project_dir, 'data', 'output', 'alpha_model')
if not path.exists(output_dir):
makedirs(output_dir)
alpha = Alpha()
blocklist = alpha.run(for_date=day)
blocklist = blocklist.rename(columns={'ip':'attacker'})
pd.DataFrame(blocklist, columns=['attacker']).to_csv(path.join(output_dir, f'alpha_{str(day)}.csv.gz'), index=False, compression='gzip')
def run_model_alpha7(day):
#Alpha 7 Model
output_dir = path.join(project_dir, 'data', 'output', 'alpha7_model')
if not path.exists(output_dir):
makedirs(output_dir)
alpha = Alpha(7)
blocklist = alpha.run(for_date=day)
blocklist = blocklist.rename(columns={'ip':'attacker'})
pd.DataFrame(blocklist, columns=['attacker']).to_csv(path.join(output_dir, f'alpha7_{str(day)}.csv.gz'), index=False, compression='gzip')
def run_model_pn(day):
# Prioritize New Model
output_dir = path.join(data_path, 'output', 'prioritize_new')
if not path.exists(output_dir):
makedirs(output_dir)
pn = New()
blocklist = pn.run(for_date=day)
blocklist.to_csv(path.join(output_dir, f'prioritize-new_{str(day)}.csv.gz'), index=False, compression='gzip')
def run_model_pc(day):
# Prioritize Consistent Model
output_dir = path.join(data_path, 'output', 'prioritize_consistent')
if not path.exists(output_dir):
makedirs(output_dir)
pc = Consistent()
blocklist = pc.run(for_date=day)
blocklist.to_csv(path.join(output_dir, f'prioritize-consistent_{str(day)}.csv.gz'), index=False, compression='gzip')
def run_model_rf(day):
# RandomForest Model
output_dir = path.join(data_path, 'output', 'random_forest')
if not path.exists(output_dir):
makedirs(output_dir)
rf = RandomForest()
blocklist = rf.run(for_date=day)
blocklist.to_csv(path.join(output_dir, f'rf_v1_30estimators_{str(day)}.csv.gz'), index=False, compression='gzip')
def run_model_pareto(day):
#Pareto Model
output_dir = path.join(project_dir, 'data', 'output', 'pareto_model')
if not path.exists(output_dir):
makedirs(output_dir)
pareto = Pareto()
blocklist = pareto.run(for_date=day)
blocklist = blocklist.rename(columns={'ip':'attacker'})
pd.DataFrame(blocklist, columns=['attacker']).to_csv(path.join(output_dir, f'pareto_{str(day)}.csv.gz'), index=False, compression='gzip')
def run_models(day):
print(day)
#run_model_alpha(day)
#run_model_pn(day)
#run_model_pc(day)
#run_model_rf(day)
run_model_pareto(day)
dates = [x.date() for x in (pd.date_range(start=start, end=end))]
st_time = time.time()
#print(f'Creating knowledgebase from {str(dates[0])} to the present')
#k = Knowledgebase()
# Need to build the knowledge outside the parallel loop
# build() is not a reentrant function
#k.build(start=dates[0], end=dates[-1])
#print(f'Knowledge created in {(time.time() - st_time)/60} minutes.')
# for day in dates:
# run_models(day)
st_time = time.time()
print('Running models')
Parallel(n_jobs=16, backend='multiprocessing')(delayed(run_models)(day) for day in dates)
print(f'Models run after {(time.time() - st_time)/60} minutes.')