/
iterable_dataset.py
1420 lines (1217 loc) 路 64.5 KB
/
iterable_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import copy
import itertools
from collections import Counter
from copy import deepcopy
from dataclasses import dataclass
from itertools import cycle, islice
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import numpy as np
import pyarrow as pa
from .arrow_dataset import DatasetInfoMixin
from .features import Features, Value
from .features.features import FeatureType
from .formatting import PythonFormatter
from .info import DatasetInfo
from .splits import NamedSplit
from .table import table_cast
def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features] = None) -> Features:
pa_table = pa.Table.from_pydict(batch)
if try_features is not None:
try:
pa_table = table_cast(pa_table, pa.schema(try_features.type))
except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError):
pass
return Features.from_arrow_schema(pa_table.schema)
def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]:
cols = sorted(examples[0].keys())
arrays = []
for col in cols:
arrays.append([example[col] for example in examples])
return dict(zip(cols, arrays))
def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]:
"""Convert a batch (dict of examples) to examples list"""
n_examples = len(batch[next(iter(batch))])
for i in range(n_examples):
yield {col: array[i] for col, array in batch.items()}
class _BaseExamplesIterable:
"""Base class for the examples iterable used by an IterableDataset"""
def __iter__(self):
"""An examples iterable should yield tuples (example_key, example) of type (int/str, dict)"""
raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet")
def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable":
"""
Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable.
If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self.
"""
raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet")
def shard_data_sources(self, shard_idx: int) -> "_BaseExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet")
@property
def n_shards(self) -> int:
raise NotImplementedError(f"{type(self)} doesn't implement n_shards yet")
def _shuffle_kwargs(rng: np.random.Generator, kwargs: dict) -> dict:
"""Return a shuffled copy of the input kwargs"""
# We must shuffle all the lists, and lists of the same size must have the same shuffling.
# This way entangled lists of (shard, shard_metadata) are still in the right order.
# First, let's generate the shuffled indices per list size
list_sizes = set(len(value) for value in kwargs.values() if isinstance(value, list))
indices_per_size = {}
for size in list_sizes:
indices_per_size[size] = list(range(size))
rng.shuffle(indices_per_size[size])
# Now let's copy the kwargs and shuffle the lists based on their sizes
shuffled_kwargs = dict(kwargs)
for key, value in shuffled_kwargs.items():
if isinstance(value, list):
shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
return shuffled_kwargs
def _shard_kwargs(shard_idx: int, kwargs: dict) -> dict:
"""Return a copy of the input kwargs but with only one shard"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sharding without ambiguity for users
lists_lengths = {key: len(value) for key, value in kwargs.items() if isinstance(value, list)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ "\nTo fix this, check the dataset script 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
)
)
return {key: [value[shard_idx]] if isinstance(value, list) else value for key, value in kwargs.items()}
class ExamplesIterable(_BaseExamplesIterable):
def __init__(self, generate_examples_fn: Callable, kwargs: dict):
self.generate_examples_fn = generate_examples_fn
self.kwargs = kwargs
def __iter__(self):
yield from self.generate_examples_fn(**self.kwargs)
def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable":
return ShardShuffledExamplesIterable(self.generate_examples_fn, self.kwargs, generator)
def shard_data_sources(self, shard_idx: int) -> "MappedExamplesIterable":
"""Keep only the requested shard."""
kwargs_with_requested_data_source = _shard_kwargs(shard_idx, self.kwargs)
yield from self.generate_examples_fn(**kwargs_with_requested_data_source)
@property
def n_shards(self) -> int:
max_length = max((len(value) for value in self.kwargs.values() if isinstance(value, list)), default=0)
return max(1, max_length)
class ShardShuffledExamplesIterable(ExamplesIterable):
def __init__(self, generate_examples_fn: Callable, kwargs: dict, generator: np.random.Generator):
super().__init__(generate_examples_fn, kwargs)
self.generator = deepcopy(generator)
def __iter__(self):
"""Shuffle the kwargs order to shuffle shards"""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_kwargs(rng, self.kwargs)
yield from self.generate_examples_fn(**kwargs_with_shuffled_shards)
def shard_data_sources(self, shard_idx: int) -> "MappedExamplesIterable":
"""Keep only the requested shard."""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_kwargs(rng, self.kwargs)
kwargs_with_requested_data_source = _shard_kwargs(shard_idx, kwargs_with_shuffled_shards)
return ExamplesIterable(self.generate_examples_fn, kwargs_with_requested_data_source)
class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
self.ex_iterables = ex_iterables
def __iter__(self):
iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
# this is an infinite iterator to keep track of which iterator we want to pick examples from
indices_iterator = cycle(range(len(iterators)))
for i in indices_iterator:
try: # let's pick one example from the iterator at index i
yield next(iterators[i])
except StopIteration: # if we ran out of examples on this iterator, break the main for loop
break
def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable":
"""Shuffle each underlying examples iterable."""
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
return CyclingMultiSourcesExamplesIterable(ex_iterables)
@property
def n_shards(self) -> int:
return sum(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
def shard_data_sources(self, shard_idx: int) -> "CyclingMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
raise NotImplementedError("Sharding a CyclingMultiSourcesExamplesIterable is not implemented")
class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
"""
VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables.
It doesn't require the examples iterables to always yield the same columns.
Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
This is done with `_apply_feature_types`.
"""
def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
self.ex_iterables = ex_iterables
def __iter__(self):
for ex_iterable in self.ex_iterables:
yield from ex_iterable
def shuffle_data_sources(
self, generator: np.random.Generator
) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
"""Shuffle the list of examples iterable, as well as each underlying examples iterable."""
rng = deepcopy(generator)
ex_iterables = list(self.ex_iterables)
rng.shuffle(ex_iterables)
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables]
return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
@property
def n_shards(self) -> int:
return sum(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
def shard_data_sources(self, shard_idx: int) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
raise NotImplementedError("Sharding a VerticallyConcatenatedMultiSourcesExamplesIterable is not implemented")
def _check_column_names(column_names: List[str]):
"""Check the column names to make sure they don't contain duplicates."""
counter = Counter(column_names)
if not all(count == 1 for count in counter.values()):
duplicated_columns = [col for col in counter if counter[col] > 1]
raise ValueError(
f"The examples iterables can't have duplicated columns but columns {duplicated_columns} are duplicated."
)
class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
"""
HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables.
It also checks that there are no duplicate columns (otherwise we don't know which one to keep).
This check is done once when yielding the first example.
However it doesn't fill missing columns with None.
Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
This is done with `_apply_feature_types`.
"""
def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
self.ex_iterables = ex_iterables
def __iter__(self):
ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
for i in itertools.count():
keys = []
examples = []
for ex_iterator in list(ex_iterators):
try:
key, example = next(ex_iterator)
keys.append(key)
examples.append(example)
except StopIteration:
ex_iterators.remove(ex_iterator)
if ex_iterators:
if i == 0:
_check_column_names([column_name for example in examples for column_name in example])
new_example = {}
for example in examples:
new_example.update(example)
new_key = "_".join(str(key) for key in keys)
yield new_key, new_example
else:
break
def shuffle_data_sources(
self, generator: np.random.Generator
) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
"""Doesn't shuffle the wrapped examples iterable since it would break the alignment between them."""
return self
@property
def n_shards(self) -> int:
return 1
def shard_data_sources(self, shard_idx: int) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
raise NotImplementedError("Sharding a HorizontallyConcatenatedMultiSourcesExamplesIterable is not implemented")
class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable):
def __init__(self, ex_iterables, generator: np.random.Generator, probabilities: Optional[List[float]] = None):
super().__init__(ex_iterables)
self.generator = deepcopy(generator)
self.probabilities = probabilities
@staticmethod
def _iter_random_indices(
rng: np.random.Generator,
num_sources: int,
random_batch_size=1000,
p: Optional[List[float]] = None,
) -> Iterator[int]:
"""Get an infinite iterator that randomly samples the index of the source to pick examples from."""
if p is None:
while True:
yield from (int(i) for i in rng.integers(0, num_sources, size=random_batch_size))
else:
while True:
yield from (int(i) for i in rng.choice(num_sources, size=random_batch_size, p=p))
def __iter__(self):
rng = deepcopy(self.generator)
iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
# this is an infinite iterator that randomly samples the index of the source to pick examples from
indices_iterator = self._iter_random_indices(rng, len(iterators), p=self.probabilities)
for i in indices_iterator:
try: # let's pick one example from the iterator at index i
yield next(iterators[i])
except StopIteration: # if we ran out of examples on this iterator, break the main for loop
break
def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable":
"""Shuffle the data sources of each wrapped examples iterable."""
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
return RandomlyCyclingMultiSourcesExamplesIterable(
ex_iterables, generator=generator, probabilities=self.probabilities
)
def shard_data_sources(self, shard_idx: int) -> "RandomlyCyclingMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
raise NotImplementedError("Sharding a RandomlyCyclingMultiSourcesExamplesIterable is not implemented")
class MappedExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
function: Callable,
with_indices: bool = False,
input_columns: Optional[List[str]] = None,
batched: bool = False,
batch_size: int = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[List[str]] = None,
):
self.ex_iterable = ex_iterable
self.function = function
self.batched = batched
self.batch_size = batch_size
self.drop_last_batch = drop_last_batch
self.remove_columns = remove_columns
self.with_indices = with_indices
self.input_columns = input_columns
def __iter__(self):
iterator = iter(self.ex_iterable)
current_idx = 0
if self.batched:
for key, example in iterator:
# If batched, first build the batch
key_examples_list = [(key, example)] + [
(key, example) for key, example in islice(iterator, self.batch_size - 1)
]
keys, examples = zip(*key_examples_list)
if self.drop_last_batch and len(examples) < self.batch_size: # ignore last batch
return
batch = _examples_to_batch(examples)
# then apply the transform
inputs = batch
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append([current_idx + i for i in range(len(key_examples_list))])
transformed_batch = dict(batch) # this will be updated with the function output
transformed_batch.update(self.function(*function_args))
# then remove the unwanted columns
if self.remove_columns:
for c in self.remove_columns:
del transformed_batch[c]
if transformed_batch:
first_col = next(iter(transformed_batch))
bad_cols = [
col
for col in transformed_batch
if len(transformed_batch[col]) != len(transformed_batch[first_col])
]
if bad_cols:
raise ValueError(
f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} while {first_col} has length {len(transformed_batch[first_col])}."
)
# the new key is the concatenation of the examples keys from the batch
new_key = "_".join(str(key) for key in keys)
# yield one example at a time from the transformed batch
for batch_idx, example in enumerate(_batch_to_examples(transformed_batch)):
yield new_key, example
current_idx += batch_idx + 1
else:
for key, example in iterator:
# If not batched, we can apply the transform and yield the example directly
# first copy the example, since we might drop some keys
example = dict(example)
# then apply the transform
inputs = example
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append(current_idx)
transformed_example = dict(example) # this will be updated with the function output
transformed_example.update(self.function(*function_args))
# then we remove the unwanted columns
if self.remove_columns:
for c in self.remove_columns:
del transformed_example[c]
yield key, transformed_example
current_idx += 1
def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return MappedExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
remove_columns=self.remove_columns,
)
def shard_data_sources(self, shard_idx: int) -> "MappedExamplesIterable":
"""Keep only the requested shard."""
return MappedExamplesIterable(
self.ex_iterable.shard_data_sources(shard_idx),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
remove_columns=self.remove_columns,
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class FilteredExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
function: Callable,
with_indices: bool = False,
input_columns: Optional[List[str]] = None,
batched: bool = False,
batch_size: int = 1000,
):
self.ex_iterable = ex_iterable
self.function = function
self.batched = batched
self.batch_size = batch_size
self.with_indices = with_indices
self.input_columns = input_columns
def __iter__(self):
iterator = iter(self.ex_iterable)
current_idx = 0
if self.batched:
for key, example in iterator:
# If batched, first build the batch
key_examples_list = [(key, example)] + [
(key, example) for key, example in islice(iterator, self.batch_size - 1)
]
keys, examples = zip(*key_examples_list)
batch = _examples_to_batch(examples)
# then compute the mask for the batch
inputs = batch
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append([current_idx + i for i in range(len(key_examples_list))])
mask = self.function(*function_args)
# yield one example at a time from the batch
for batch_idx, (key_example, to_keep) in enumerate(zip(key_examples_list, mask)):
if to_keep:
yield key_example
current_idx += batch_idx + 1
else:
for key, example in iterator:
# If not batched, we can apply the filtering function direcly
inputs = dict(example)
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append(current_idx)
to_keep = self.function(*function_args)
if to_keep:
yield key, example
current_idx += 1
def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return FilteredExamplesIterable(
self.ex_iterable.shuffle_data_sources(seed),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
)
def shard_data_sources(self, shard_idx: int) -> "FilteredExamplesIterable":
"""Keep only the requested shard."""
return FilteredExamplesIterable(
self.ex_iterable.shard_data_sources(shard_idx),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class BufferShuffledExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator):
self.ex_iterable = ex_iterable
self.buffer_size = buffer_size
self.generator = generator
@staticmethod
def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]:
while True:
yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
def __iter__(self):
buffer_size = self.buffer_size
rng = deepcopy(self.generator)
indices_iterator = self._iter_random_indices(rng, buffer_size)
# this is the shuffle buffer that we keep in memory
mem_buffer = []
for x in self.ex_iterable:
if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it
i = next(indices_iterator)
yield mem_buffer[i]
mem_buffer[i] = x # replace the picked example by a new one
else: # otherwise, keep filling the buffer
mem_buffer.append(x)
# when we run out of examples, we shuffle the remaining examples in the buffer and yield them
rng.shuffle(mem_buffer)
yield from mem_buffer
def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable":
"""Shuffle the wrapped examples iterable as well as the shuffling buffer."""
return BufferShuffledExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator
)
def shard_data_sources(self, shard_idx: int) -> "BufferShuffledExamplesIterable":
"""Keep only the requested shard."""
return BufferShuffledExamplesIterable(
self.ex_iterable.shard_data_sources(shard_idx), buffer_size=self.buffer_size, generator=self.generator
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class SkipExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
self.ex_iterable = ex_iterable
self.n = n
def __iter__(self):
ex_iterator = iter(self.ex_iterable)
for _ in islice(ex_iterator, self.n):
pass
yield from ex_iterator
def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable":
"""Doesn't shuffle the wrapped examples iterable since it would skip examples from other shards instead."""
return self
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
class TakeExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
self.ex_iterable = ex_iterable
self.n = n
def __iter__(self):
yield from islice(self.ex_iterable, self.n)
def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable":
"""Doesn't shuffle the wrapped examples iterable since it would take examples from other shards instead."""
return self
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
def _apply_feature_types(
example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
) -> dict:
example = dict(example)
# add missing columns
for column_name in features:
if column_name not in example:
example[column_name] = None
# we encode the example for ClassLabel feature types for example
encoded_example = features.encode_example(example)
# Decode example for Audio feature, e.g.
decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id)
return decoded_example
class TypedExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
features: Features,
token_per_repo_id: Dict[str, Union[str, bool, None]],
):
self.ex_iterable = ex_iterable
self.features = features
self.token_per_repo_id = token_per_repo_id
def __iter__(self):
# Then for each example, `TypedExamplesIterable` automatically fills missing columns with None.
# This is done with `_apply_feature_types`.
for key, example in self.ex_iterable:
yield key, _apply_feature_types(example, self.features, token_per_repo_id=self.token_per_repo_id)
def shuffle_data_sources(self, generator: np.random.Generator) -> "TypedExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return TypedExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator),
features=self.features,
token_per_repo_id=self.token_per_repo_id,
)
def shard_data_sources(self, shard_idx: int) -> "TypedExamplesIterable":
"""Keep only the requested shard."""
return TypedExamplesIterable(
self.ex_iterable.shard_data_sources(shard_idx),
features=self.features,
token_per_repo_id=self.token_per_repo_id,
)
@property
def n_shards(self) -> int:
return self.ex_iterable.n_shards
def _generate_examples_from_tables_wrapper(generate_tables_fn):
def wrapper(**kwargs):
python_formatter = PythonFormatter()
for key, table in generate_tables_fn(**kwargs):
batch = python_formatter.format_batch(table)
for i, example in enumerate(_batch_to_examples(batch)):
yield f"{key}_{i}", example
return wrapper
@dataclass
class ShufflingConfig:
generator: np.random.Generator
class IterableDataset(DatasetInfoMixin):
"""A Dataset backed by an iterable."""
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
format_type: Optional[str] = None,
shuffling: Optional[ShufflingConfig] = None,
token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
):
info = info.copy() if info is not None else DatasetInfo()
DatasetInfoMixin.__init__(self, info=info, split=split)
self._ex_iterable = ex_iterable
self._format_type = format_type
self._shuffling = shuffling
self._epoch = 0
self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {}
def _head(self, n=5):
return _examples_to_batch([x for key, x in islice(self._iter(), n)])
def _effective_generator(self):
if self._shuffling and self._epoch == 0:
return self._shuffling.generator
elif self._shuffling:
# Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars)
effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch
effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed
return np.random.default_rng(effective_seed)
else:
raise ValueError("This dataset is not shuffled")
@property
def n_shards(self) -> int:
return self._ex_iterable.n_shards
def _iter(self):
if self._shuffling:
ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator())
else:
ex_iterable = self._ex_iterable
yield from ex_iterable
def _iter_shard(self, shard_idx: int):
if self._shuffling:
ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator())
else:
ex_iterable = self._ex_iterable
yield from ex_iterable.shard_data_sources(shard_idx)
def __iter__(self):
for key, example in self._iter():
if self.features:
# `IterableDataset` automatically fills missing columns with None.
# This is done with `_apply_feature_types`.
yield _apply_feature_types(example, self.features, token_per_repo_id=self._token_per_repo_id)
else:
yield example
def with_format(
self,
type: Optional[str] = None,
) -> "IterableDataset":
"""
Return a dataset with the specified format.
This method only supports the "torch" format for now.
Args:
type (:obj:`str`, optional, default None): if set to "torch", the returned dataset
will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader
"""
# TODO(QL): add examples formatting to get tensors when using the "torch" format
# TODO(QL): add format_kwargs
# TODO(QL): add format_columns and return_all_columns
# TODO(QL): add pandas, numpy and tf formats
return iterable_dataset(
ex_iterable=self._ex_iterable,
info=self._info.copy(),
split=self._split,
format_type=type,
shuffling=copy.deepcopy(self._shuffling),
token_per_repo_id=self._token_per_repo_id,
)
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: int = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
) -> "IterableDataset":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
The function is applied on-the-fly on the examples when iterating over the dataset.
You can specify whether the function should be batched or not with the ``batched`` parameter:
- If batched is False, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. {"text": "Hello there !"}
- If batched is True and batch_size is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}
- If batched is True and batch_size is ``n`` > 1, then the function takes a batch of ``n`` examples as input and can return a batch with ``n`` examples, or with an arbitrary number of examples.
Note that the last batch may have less than ``n`` examples.
A batch is a dictionary, e.g. a batch of ``n`` examples is {"text": ["Hello there !"] * n}
Args:
function (:obj:`Callable`, optional, default None): Function applied on-the-fly on the examples when you iterate on the dataset
It must have one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: ``lambda x: x``.
with_indices (:obj:`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`Optional[Union[str, List[str]]]`, default `None`): The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (:obj:`bool`, default `False`): Provide batch of examples to `function`.
batch_size (:obj:`int`, optional, default ``1000``): Number of examples per batch provided to `function` if `batched=True`.
drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> list(ds.take(3))
[{'label': 1,
'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
```
"""
if isinstance(input_columns, str):
input_columns = [input_columns]
if isinstance(remove_columns, str):
remove_columns = [remove_columns]
if function is None:
function = lambda x: x # noqa: E731
info = self._info.copy()
info.features = None
ex_iterable = MappedExamplesIterable(
TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
if self._info.features is not None
else self._ex_iterable,
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
)
return iterable_dataset(
ex_iterable=ex_iterable,
info=info,
split=self._split,
format_type=self._format_type,
shuffling=copy.deepcopy(self._shuffling),
token_per_repo_id=self._token_per_repo_id,
)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
) -> "IterableDataset":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
Args:
function (:obj:`Callable`): Callable with one of the following signatures:
- ``function(example: Dict[str, Any]) -> bool`` if ``with_indices=False, batched=False``
- ``function(example: Dict[str, Any], indices: int) -> bool`` if ``with_indices=True, batched=False``
- ``function(example: Dict[str, List]) -> List[bool]`` if ``with_indices=False, batched=True``
- ``function(example: Dict[str, List], indices: List[int]) -> List[bool]`` if ``with_indices=True, batched=True``
If no function is provided, defaults to an always True function: ``lambda x: True``.
with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (:obj:`str` or `List[str]`, optional): The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function`
batch_size (:obj:`int`, optional, default ``1000``): Number of examples per batch provided to `function` if `batched=True`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> ds = ds.filter(lambda x: x["label"] == 0)
>>> list(ds.take(3))
[{'label': 0, 'movie_review': 'simplistic , silly and tedious .'},
{'label': 0,
'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
{'label': 0,
'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
```
"""
if isinstance(input_columns, str):
input_columns = [input_columns]
# TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example)
info = copy.deepcopy(self._info)
info.features = None
# We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here
ex_iterable = FilteredExamplesIterable(
TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
if self._info.features is not None
else self._ex_iterable,
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
)
return iterable_dataset(
ex_iterable=ex_iterable,
info=info,
split=self._split,
format_type=self._format_type,
shuffling=copy.deepcopy(self._shuffling),
token_per_repo_id=self._token_per_repo_id,
)
def shuffle(
self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
) -> "IterableDataset":
"""
Randomly shuffles the elements of this dataset.
This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
equal to the full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but ``buffer_size`` is set to 1,000, then shuffle will
initially select a random element from only the first 1,000 elements in the buffer. Once an element is
selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1,000 element buffer.
If the dataset is made of several shards, it also does shuffle the order of the shards.
However if the order has been fixed by using :func:`datasets.IterableDataset.skip` or :func:`datasets.IterableDataset.take`
then the order of the shards is kept unchanged.
Args:
seed (:obj:`int`, optional, default None): random seed that will be used to shuffle the dataset.
It is used to sample from the shuffle buffe and als oto shuffle the data shards.
generator (:obj:`numpy.random.Generator`, optional): Numpy random Generator to use to compute the permutation of the dataset rows.
If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy).
buffer_size (:obj:`int`, default 1000): size of the buffer.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> shuffled_ds = ds.shuffle(seed=42)
>>> list(shuffled_ds.take(3))
[{'label': 1,
'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
{'label': 1,
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
{'label': 1,
'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
```
"""
if generator is None:
generator = np.random.default_rng(seed)
else:
generator = deepcopy(generator)
shuffling = ShufflingConfig(generator=generator)
return iterable_dataset(
ex_iterable=BufferShuffledExamplesIterable(
self._ex_iterable, buffer_size=buffer_size, generator=generator
).shuffle_data_sources(generator),
info=self._info.copy(),
split=self._split,
format_type=self._format_type,
shuffling=shuffling,
token_per_repo_id=self._token_per_repo_id,
)
def set_epoch(self, epoch: int):
self._epoch = epoch
def skip(self, n) -> "IterableDataset":
"""
Create a new IterableDataset that skips the first ``n`` elements.
Args:
n (:obj:`int`): number of elements to skip.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.skip(1)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},