-
-
Notifications
You must be signed in to change notification settings - Fork 255
/
k_means_kdd.py
157 lines (124 loc) · 3.84 KB
/
k_means_kdd.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
"""
Script for running K-Means clustering on the KDD-Cup dataset.
"""
import argparse
import glob
import logging
import os
import string
import sys
import dask.array as da
import dask.dataframe as dd
import pandas as pd
import requests
import sklearn.cluster as sk
from distributed import Client
from sklearn.datasets import get_data_home
import s3fs
from dask_ml.cluster import KMeans
from dask_ml.utils import _timer
from .base import make_parser
logger = logging.getLogger(__name__)
try:
import coloredlogs
except ImportError:
pass
else:
coloredlogs.install()
URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz" # noqa
def parse_args(args=None):
parser = make_parser()
parser.add_argument(
"--path",
default="s3://dask-data/kddcup/kdd.parq/",
help="Path for the data (potentially an S3 Key",
)
return parser.parse_args(args)
def download():
p = os.path.join(get_data_home(), "kddcup.data.gz")
if os.path.exists(p):
return p
r = requests.get(URL, stream=True)
with open(p, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return p
def split(p):
output = os.path.join(get_data_home(), "kddcup.parq")
if not os.path.exists(output):
dtype = {1: "category", 2: "category", 3: "category", 41: "category"}
df = pd.read_csv(p, header=None, dtype=dtype)
cat_cols = df.select_dtypes(include=["category"]).columns
df[cat_cols] = df[cat_cols].apply(lambda col: col.cat.codes)
df.columns = list(string.ascii_letters[: len(df.columns)])
ddf = dd.from_pandas(df, npartitions=16)
ddf.to_parquet(output)
return output
def upload(p, fs):
fs.mkdir("dask-data/kddcup/kdd.parq")
for file in glob(os.path.join(p, "*")):
fs.put(file, "dask-data/kddcup/kdd.parq/" + file.split("/")[-1])
print(file)
def as_known(X, lengths):
blocks = X.to_delayed().flatten()
P = X.shape[1]
arrays = [
da.from_delayed(x, dtype=X.dtype, shape=(length, P))
for x, length in zip(blocks, lengths)
]
return da.concatenate(arrays, axis=0)
def load(path):
logger.info("Reading data")
df = dd.read_parquet(path)
df = df.persist()
logger.info("Data in memory")
# Get known chunks
lengths = df.map_partitions(len).compute()
X = as_known(df.values, lengths)
X = X.persist()
return X
def fit(data, use_scikit_learn=False):
logger.info("Starting to cluster")
# Cluster
n_clusters = 8
oversampling_factor = 2
if use_scikit_learn:
km = sk.KMeans(n_clusters=n_clusters, random_state=0)
else:
km = KMeans(
n_clusters=n_clusters,
oversampling_factor=oversampling_factor,
random_state=0,
)
logger.info(
"Starting n_clusters=%2d, oversampling_factor=%2d",
n_clusters,
oversampling_factor,
)
with _timer("km.fit", _logger=logger):
km.fit(data)
def main(args=None):
args = parse_args(args)
logger.info("Checking local data")
local = split(download())
if args.scheduler_address:
logger.info("Connecting to %s", args.scheduler_address)
client = Client(args.scheduler_address)
logger.info(client.scheduler_info())
if not args.local:
logger.info("Using distributed mode")
fs = s3fs.S3FileSystem()
if fs.exists("dask-data/kddcup/kdd.parq/"):
logger.info("Using cached dataset")
else:
logger.info("Uploading to cloud storage")
upload(local, fs)
path = "s3://dask-data/kddcup/kdd.parq/"
else:
logger.info("Using local mode")
path = local
data = load(path)
fit(data, args.scikit_learn)
if __name__ == "__main__":
sys.exit(main(None))