-
Notifications
You must be signed in to change notification settings - Fork 192
/
dedup.py
483 lines (389 loc) · 14.9 KB
/
dedup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
from __future__ import absolute_import, print_function, division
import operator
from petl.compat import text_type
from petl.util.base import Table, asindices, itervalues
from petl.transform.sorts import sort
def duplicates(table, key=None, presorted=False, buffersize=None, tempdir=None,
cache=True):
"""
Select rows with duplicate values under a given key (or duplicate
rows where no key is given). E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, 2.0],
... ['B', 2, 3.4],
... ['D', 6, 9.3],
... ['B', 3, 7.8],
... ['B', 2, 12.3],
... ['E', None, 1.3],
... ['D', 4, 14.5]]
>>> table2 = etl.duplicates(table1, 'foo')
>>> table2
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'B' | 2 | 3.4 |
+-----+-----+------+
| 'B' | 3 | 7.8 |
+-----+-----+------+
| 'B' | 2 | 12.3 |
+-----+-----+------+
| 'D' | 6 | 9.3 |
+-----+-----+------+
| 'D' | 4 | 14.5 |
+-----+-----+------+
>>> # compound keys are supported
... table3 = etl.duplicates(table1, key=['foo', 'bar'])
>>> table3
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'B' | 2 | 3.4 |
+-----+-----+------+
| 'B' | 2 | 12.3 |
+-----+-----+------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
See also :func:`petl.transform.dedup.unique` and
:func:`petl.transform.dedup.distinct`.
"""
return DuplicatesView(table, key=key, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.duplicates = duplicates
class DuplicatesView(Table):
def __init__(self, source, key=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
def __iter__(self):
return iterduplicates(self.source, self.key)
def iterduplicates(source, key):
# assume source is sorted
# first need to sort the data
it = iter(source)
hdr = next(it)
yield tuple(hdr)
# convert field selection into field indices
if key is None:
indices = range(len(hdr))
else:
indices = asindices(hdr, key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
getkey = operator.itemgetter(*indices)
previous = None
previous_yielded = False
for row in it:
if previous is None:
previous = row
else:
kprev = getkey(previous)
kcurr = getkey(row)
if kprev == kcurr:
if not previous_yielded:
yield tuple(previous)
previous_yielded = True
yield tuple(row)
else:
# reset
previous_yielded = False
previous = row
def unique(table, key=None, presorted=False, buffersize=None, tempdir=None,
cache=True):
"""
Select rows with unique values under a given key (or unique rows
if no key is given). E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... ['D', 'xyz', 9.0],
... ['B', u'3', u'7.8'],
... ['B', '2', 42],
... ['E', None, None],
... ['D', 4, 12.3],
... ['F', 7, 2.3]]
>>> table2 = etl.unique(table1, 'foo')
>>> table2
+-----+------+------+
| foo | bar | baz |
+=====+======+======+
| 'A' | 1 | 2 |
+-----+------+------+
| 'E' | None | None |
+-----+------+------+
| 'F' | 7 | 2.3 |
+-----+------+------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
See also :func:`petl.transform.dedup.duplicates` and
:func:`petl.transform.dedup.distinct`.
"""
return UniqueView(table, key=key, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.unique = unique
class UniqueView(Table):
def __init__(self, source, key=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
def __iter__(self):
return iterunique(self.source, self.key)
def iterunique(source, key):
# assume source is sorted
# first need to sort the data
it = iter(source)
hdr = next(it)
yield tuple(hdr)
# convert field selection into field indices
if key is None:
indices = range(len(hdr))
else:
indices = asindices(hdr, key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
getkey = operator.itemgetter(*indices)
try:
prev = next(it)
except StopIteration:
return
prev_key = getkey(prev)
prev_comp_ne = True
for curr in it:
curr_key = getkey(curr)
curr_comp_ne = (curr_key != prev_key)
if prev_comp_ne and curr_comp_ne:
yield tuple(prev)
prev = curr
prev_key = curr_key
prev_comp_ne = curr_comp_ne
# last one?
if prev_comp_ne:
yield prev
def conflicts(table, key, missing=None, include=None, exclude=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
"""
Select rows with the same key value but differing in some other field.
E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, 2.7],
... ['B', 2, None],
... ['D', 3, 9.4],
... ['B', None, 7.8],
... ['E', None],
... ['D', 3, 12.3],
... ['A', 2, None]]
>>> table2 = etl.conflicts(table1, 'foo')
>>> table2
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'A' | 1 | 2.7 |
+-----+-----+------+
| 'A' | 2 | None |
+-----+-----+------+
| 'D' | 3 | 9.4 |
+-----+-----+------+
| 'D' | 3 | 12.3 |
+-----+-----+------+
Missing values are not considered conflicts. By default, `None` is treated
as the missing value, this can be changed via the `missing` keyword
argument.
One or more fields can be ignored when determining conflicts by providing
the `exclude` keyword argument. Alternatively, fields to use when
determining conflicts can be specified explicitly with the `include`
keyword argument. This provides a simple mechanism for analysing the
source of conflicting rows from multiple tables, e.g.::
>>> table1 = [['foo', 'bar'], [1, 'a'], [2, 'b']]
>>> table2 = [['foo', 'bar'], [1, 'a'], [2, 'c']]
>>> table3 = etl.cat(etl.addfield(table1, 'source', 1),
... etl.addfield(table2, 'source', 2))
>>> table4 = etl.conflicts(table3, key='foo', exclude='source')
>>> table4
+-----+-----+--------+
| foo | bar | source |
+=====+=====+========+
| 2 | 'b' | 1 |
+-----+-----+--------+
| 2 | 'c' | 2 |
+-----+-----+--------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
"""
return ConflictsView(table, key, missing=missing, exclude=exclude,
include=include, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.conflicts = conflicts
class ConflictsView(Table):
def __init__(self, source, key, missing=None, exclude=None, include=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.missing = missing
self.exclude = exclude
self.include = include
def __iter__(self):
return iterconflicts(self.source, self.key, self.missing, self.exclude,
self.include)
def iterconflicts(source, key, missing, exclude, include):
# normalise arguments
if exclude and not isinstance(exclude, (list, tuple)):
exclude = (exclude,)
if include and not isinstance(include, (list, tuple)):
include = (include,)
# exclude overrides include
if include and exclude:
include = None
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
yield tuple(hdr)
# convert field selection into field indices
indices = asindices(hdr, key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
getkey = operator.itemgetter(*indices)
previous = None
previous_yielded = False
for row in it:
if previous is None:
previous = row
else:
kprev = getkey(previous)
kcurr = getkey(row)
if kprev == kcurr:
# is there a conflict?
conflict = False
for x, y, f in zip(previous, row, flds):
if (exclude and f not in exclude) \
or (include and f in include) \
or (not exclude and not include):
if missing not in (x, y) and x != y:
conflict = True
break
if conflict:
if not previous_yielded:
yield tuple(previous)
previous_yielded = True
yield tuple(row)
else:
# reset
previous_yielded = False
previous = row
def distinct(table, key=None, count=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""
Return only distinct rows in the table.
If the `count` argument is not None, it will be used as the name for an
additional field, and the values of the field will be the number of
duplicate rows.
If the `key` keyword argument is passed, the comparison is done on the
given key instead of the full row.
See also :func:`petl.transform.dedup.duplicates`,
:func:`petl.transform.dedup.unique`,
:func:`petl.transform.reductions.groupselectfirst`,
:func:`petl.transform.reductions.groupselectlast`.
"""
return DistinctView(table, key=key, count=count, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.distinct = distinct
class DistinctView(Table):
def __init__(self, table, key=None, count=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
if presorted:
self.table = table
else:
self.table = sort(table, key=key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.count = count
def __iter__(self):
it = iter(self.table)
hdr = next(it)
# convert field selection into field indices
if self.key is None:
indices = range(len(hdr))
else:
indices = asindices(hdr, self.key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
getkey = operator.itemgetter(*indices)
INIT = object()
if self.count:
hdr = tuple(hdr) + (self.count,)
yield hdr
previous = INIT
n_dup = 1
for row in it:
if previous is INIT:
previous = row
else:
kprev = getkey(previous)
kcurr = getkey(row)
if kprev == kcurr:
n_dup += 1
else:
yield tuple(previous) + (n_dup,)
n_dup = 1
previous = row
# deal with last row
yield tuple(previous) + (n_dup,)
else:
yield tuple(hdr)
previous_keys = INIT
for row in it:
keys = getkey(row)
if keys != previous_keys:
yield tuple(row)
previous_keys = keys
def isunique(table, field):
"""
Return True if there are no duplicate values for the given field(s),
otherwise False. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b'],
... ['b', 2],
... ['c', 3, True]]
>>> etl.isunique(table1, 'foo')
False
>>> etl.isunique(table1, 'bar')
True
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes.
"""
vals = set()
for v in itervalues(table, field):
if v in vals:
return False
else:
vals.add(v)
return True
Table.isunique = isunique