-
Notifications
You must be signed in to change notification settings - Fork 94
/
lattice_lib.py
2790 lines (2392 loc) · 120 KB
/
lattice_lib.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of algorithms required for Lattice layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import math
from . import utils
from absl import logging
import numpy as np
import six
import tensorflow as tf
def evaluate_with_simplex_interpolation(inputs, kernel, units, lattice_sizes,
clip_inputs):
"""Evaluates a lattice using simplex interpolation.
Within each cell of the lattice, we partition the hypercube into d! simplices,
where each simplex has d+1 vertices. Each simplex (relative to the lower
corner of the hypercube) includes the all-zeros vertex, a vertex with a
single one, a vertex with two ones, ... and the all-ones vertex.
For example, for a three-dimensional unit hypercube the 3! = 6 simplices are:
[0,0,0], [0,0,1], [0,1,1], [1,1,1]
[0,0,0], [0,0,1], [1,0,1], [1,1,1]
[0,0,0], [0,1,0], [0,1,1], [1,1,1]
[0,0,0], [0,1,0], [1,1,0], [1,1,1]
[0,0,0], [1,0,0], [1,1,0], [1,1,1]
[0,0,0], [1,0,0], [1,0,1], [1,1,1]
A point x in the hypercube is contained in the simplex corresponding to the
order of x's components. For example, x = [0.4,0.2,0.8] is contained in the
simplex specified by [2,0,1] (second in the above list). The weight associated
with each vertex in the simplex is the difference between the decreasingly
sorted cooredinates of the input. For details, see e.g. "Dissection of the
hypercube into simplices", D.G. Mead, Proceedings of the AMS, 76:2, Sep. 1979.
Args:
inputs: Tensor of shape: `(batch_size, ..., len(lattice_sizes))` or list of
`len(lattice_sizes)` tensors of same shape `(batch_size, ..., 1)` which
represents points to apply lattice interpolation to. A typical shape is
`(batch_size, len(lattice_sizes))`.
kernel: Lattice kernel of shape (num_params_per_lattice, units).
units: Output dimension of the lattice.
lattice_sizes: List or tuple of integers which represents lattice sizes of
layer for which interpolation is being computed.
clip_inputs: Whether inputs should be clipped to the input range of the
lattice.
Returns:
Tensor of shape: `(batch_size, ..., units)`.
"""
if isinstance(inputs, list):
inputs = tf.concat(inputs, axis=-1)
if clip_inputs:
inputs = _clip_onto_lattice_range(
inputs=inputs, lattice_sizes=lattice_sizes)
lattice_rank = len(lattice_sizes)
input_dim = len(inputs.shape)
all_size_2 = all(size == 2 for size in lattice_sizes)
# Strides are the changes in the global index (index into the flattened
# parameters) when moving across each dimension.
# E.g. for 2x2x2, strides are [4, 2, 1].
strides = tf.constant(
np.cumprod([1] + lattice_sizes[::-1][:-1])[::-1], tf.int32)
if not all_size_2:
# Find offset (into flattened parameters) for the lower corner of the
# hypercube where input lands in.
lower_corner_coordinates = tf.cast(inputs, tf.int32)
# Avoid the corner case of landing on the outermost edge.
lower_corner_coordinates = tf.minimum(lower_corner_coordinates,
np.array(lattice_sizes) - 2)
# Multiplying coordinates by strides and summing up gives out the index into
# the flattened parameter tensor.
# Note: Alternative method using tf.tensordot + tf.expand_dims is slower.
lower_corner_offset = tf.reduce_sum(
lower_corner_coordinates * strides, axis=-1, keepdims=True)
# Continue simplex interpolation with the residuals
inputs = inputs - tf.cast(lower_corner_coordinates, inputs.dtype)
# Get sorted values and indicies.
# TODO: investigate if there is a way to avoid sorting twice.
sorted_indices = tf.argsort(inputs, direction="DESCENDING")
sorted_inputs = tf.sort(inputs, direction="DESCENDING")
# Simplex interpolation weights are the deltas between residuals.
no_padding_dims = [[0, 0]] * (input_dim - 1)
sorted_inputs_padded_left = tf.pad(
sorted_inputs, no_padding_dims + [[1, 0]], constant_values=1.)
sorted_inputs_padded_right = tf.pad(
sorted_inputs, no_padding_dims + [[0, 1]], constant_values=0.)
weights = sorted_inputs_padded_left - sorted_inputs_padded_right
# Calculate cumsum over the strides of sorted dimensions to get index of
# simplex vertices into the flattened lattice parameters.
sorted_strides = tf.gather(strides, sorted_indices)
if all_size_2:
# Lower corner offset is 0 for 2^d lattices.
corner_offset_and_sorted_strides = tf.pad(sorted_strides,
no_padding_dims + [[1, 0]])
else:
corner_offset_and_sorted_strides = tf.concat(
[lower_corner_offset, sorted_strides], axis=-1)
indices = tf.cumsum(corner_offset_and_sorted_strides, axis=-1)
# Get parameters values of simplex indicies.
if units == 1:
gathered_params = tf.gather(tf.reshape(kernel, [-1]), indices)
else:
# We now have two tensors 'indices' and 'weights' of shape (batch, units).
# The kernel is of shape (num_params_per_lattice, units).
# In order to use tf.gather, we need to convert 'indices' so that they are
# indices into the flattened parameter tensor.
# Note: Alternative method that uses a transpose on the parameters instead
# of a multiply on the indices is slower with typical batch sizes.
unit_offset = tf.constant([[i] * (lattice_rank + 1) for i in range(units)])
flat_indices = indices * units + unit_offset
gathered_params = tf.gather(tf.reshape(kernel, [-1]), flat_indices)
# Dot product with interpolation weights.
# Note: Alternative method using tf.einsum is slightly slower on CPU.
return tf.reduce_sum(
tf.multiply(gathered_params, weights), axis=-1, keepdims=(units == 1))
def evaluate_with_hypercube_interpolation(inputs, kernel, units, lattice_sizes,
clip_inputs):
"""Evaluates a lattice using hypercube interpolation.
Lattice function is multi-linearly interpolated between the 2^d vertices of a
hypercube. This interpolation method is typically slower than simplex
interpolation, since each value is interpolated from 2^d hypercube corners,
rather than d+1 simplex corners. For details, see e.g. "Dissection of the
hypercube into simplices", D.G. Mead, Proceedings of the AMS, 76:2, Sep. 1979.
Args:
inputs: Tensor representing points to apply lattice interpolation to. If
units = 1, tensor should be of shape: `(batch_size, ...,
len(lattice_sizes))` or list of `len(lattice_sizes)` tensors of same
shape `(batch_size, ..., 1)`.
If units > 1, tensor should be of shape: `(batch_size, ..., units,
len(lattice_sizes))` or list of `len(lattice_sizes)` tensors of same
shape `(batch_size, ..., units, 1)`. A typical shape is `(batch_size,
len(lattice_sizes))`.
kernel: Lattice kernel of shape (num_params_per_lattice, units).
units: Output dimension of the lattice.
lattice_sizes: List or tuple of integers which represents lattice sizes of
layer for which interpolation is being computed.
clip_inputs: Whether inputs should be clipped to the input range of the
lattice.
Returns:
Tensor of shape: `(batch_size, ..., units)`.
"""
interpolation_weights = compute_interpolation_weights(
inputs=inputs, lattice_sizes=lattice_sizes, clip_inputs=clip_inputs)
if units == 1:
# Weights shape: (batch-size, ..., prod(lattice_sizes))
# Kernel shape: (prod(lattice_sizes), 1)
return tf.matmul(interpolation_weights, kernel)
else:
# Weights shape: (batch-size, ..., units, prod(lattice_sizes))
# Kernel shape: (prod(lattice_sizes), units)
return tf.reduce_sum(interpolation_weights * tf.transpose(kernel), axis=-1)
# TODO: Rename and update usage.
def compute_interpolation_weights(inputs, lattice_sizes, clip_inputs=True):
"""Computes weights for hypercube lattice interpolation.
Running time: `O(batch_size * prod(lattice_sizes))`
If `clip_inputs == True`, inputs outside of the range defined by
`lattice_sizes` will be clipped into the lattice input range. If not, the
corresponding weights will linearly approach 0.0 with input moving away from
the valid input range.
Args:
inputs: Tensor of shape: `(batch_size, ..., len(lattice_sizes))` or list of
`len(lattice_sizes)` tensors of same shape `(batch_size, ..., 1)` which
represents points to apply lattice interpolation to. A typical shape is
`(batch_size, len(lattice_sizes))`.
lattice_sizes: List or tuple of integers which represents lattice sizes of
layer for which interpolation is being computed.
clip_inputs: Whether inputs should be clipped to the input range of the
lattice.
Raises:
ValueError: If last dimension of `inputs` does not match `lattice_sizes`.
Returns:
Interpolation weights tensor of shape:
`(batch_size, ..., prod(lattice_sizes))`.
"""
if isinstance(inputs, list):
input_shape = [tensor.shape for tensor in inputs]
input_dtype = inputs[0].dtype
else:
input_shape = inputs.shape
input_dtype = inputs.dtype
verify_hyperparameters(lattice_sizes=lattice_sizes, input_shape=input_shape)
# Special case: 2^d lattice with input passed in as a single tensor
if all(size == 2 for size in lattice_sizes) and not isinstance(inputs, list):
w = tf.stack([(1.0 - inputs), inputs], axis=-1)
if clip_inputs:
w = tf.clip_by_value(w, clip_value_min=0, clip_value_max=1)
one_d_interpolation_weights = tf.unstack(w, axis=-2)
return batch_outer_operation(one_d_interpolation_weights, operation="auto")
if clip_inputs:
inputs = _clip_onto_lattice_range(
inputs=inputs, lattice_sizes=lattice_sizes)
# Create interpolation keypoints in advance in order to reuse them for all
# dimensions of same size.
dim_keypoints = {}
for dim_size in set(lattice_sizes):
dim_keypoints[dim_size] = tf.constant([i for i in range(dim_size)],
dtype=input_dtype)
# Bucketize in order to share interpolation ops across consequtive dims of
# same size.
bucketized_inputs = _bucketize_consequtive_equal_dims(
inputs=inputs, lattice_sizes=lattice_sizes)
one_d_interpolation_weights = []
for tensor, bucket_size, dim_size in bucketized_inputs:
if bucket_size > 1:
# Within bucket all dims have same lattice sizes so instead of splitting
# before interpolation we split after interpolation.
# Expand dims in order to make interpolation through broadcasting work.
tensor = tf.expand_dims(tensor, axis=-1)
# Broadcasting subtraction op.
distance = tf.abs(tensor - dim_keypoints[dim_size])
# Following ops will do following:
# 1) if distance >= 1.0 then set interpolation weight to 0.0.
# 2) if distance < 1.0 then set interpolation weight to 1.0 - distance.
weights = 1.0 - tf.minimum(distance, 1.0)
if bucket_size == 1:
one_d_interpolation_weights.append(weights)
else:
one_d_interpolation_weights.extend(tf.unstack(weights, axis=-2))
return batch_outer_operation(one_d_interpolation_weights, operation="auto")
def batch_outer_operation(list_of_tensors, operation="auto"):
"""Computes outer operation of last dimensions of each of given tensors.
Args:
list_of_tensors: List of tensors of same shape `(batch_size, ..., k[i])`
where everything expect `k_i` matches.
operation: - binary TF operation which supports broadcasting to be applied.
- string "auto" in order to apply tf.multiply for first several tensors
and tf.matmul for remaining.
Returns:
Tensor of shape: `(batch_size, ..., mul_i(k[i]))`.
"""
# Alternative implementation using tf.einsum creates fewer graph nodes.
# This is slightly slower on CPU as of 2020/5, but the timing results might
# change with different setup/platform/hardware.
# Create a formula for outer product. e.g. '...a,...b,...c->...abc'
# if operation == "auto":
# n = len(list_of_tensors)
# chars = string.ascii_lowercase[:n]
# eqn = ",".join(["..." + c for c in chars]) + "->..." + "".join(chars)
# result = tf.einsum(eqn, *list_of_tensors)
# result_shape = [-1] + [int(size) for size in result.shape[1:]]
# output_shape = result_shape[:-n] + [np.prod(result_shape[-n:])]
# return tf.reshape(result, shape=output_shape)
if len(list_of_tensors) == 1:
return list_of_tensors[0]
# Dimensions of size '1' at position -1 of first tensor and -2 of second
# tensor will result in outer operation due to broadcasting.
result = tf.expand_dims(list_of_tensors[0], axis=-1)
for i, tensor in enumerate(list_of_tensors[1:]):
if operation == "auto":
# Threshold 6 determined empirically for 2^d lattices.
op = tf.multiply if i < 6 else tf.matmul
else:
op = operation
result = op(result, tf.expand_dims(tensor, axis=-2))
# For TF1 compatibility convert shape to integers allowing first dimension
# to be undefined.
#
# If we want to support arbitrary number of undefined dimensions we must
# compute new_shape using tf ops. It is undesireble because we want to
# minimize graph size.
shape = [-1] + [int(size) for size in result.shape[1:]]
# Merge last 2 dimensions which we just multiplied.
new_shape = shape[:-2] + [shape[-2] * shape[-1]]
# Since we are doing reshape anyway append 1 to prepare 'result' for
# following outer operation.
if i < len(list_of_tensors) - 2:
new_shape.append(1)
result = tf.reshape(result, shape=new_shape)
return result
def _clip_onto_lattice_range(inputs, lattice_sizes):
"""Clips inputs onto valid input range for given lattice_sizes.
Args:
inputs: `inputs` argument of `compute_interpolation_weights`.
lattice_sizes: list or tuple of integers which represents lattice sizes to
clip onto.
Returns:
Clipped `inputs`.
"""
if not isinstance(inputs, list):
upper_bounds = [dim_size - 1.0 for dim_size in lattice_sizes]
return tf.clip_by_value(
inputs,
clip_value_min=tf.zeros(shape=len(lattice_sizes), dtype=inputs.dtype),
clip_value_max=tf.constant(upper_bounds, dtype=inputs.dtype))
else:
# Share bound constant across dimensions of same size.
dim_upper_bounds = {}
for dim_size in set(lattice_sizes):
dim_upper_bounds[dim_size] = tf.constant(
dim_size - 1.0, dtype=inputs[0].dtype)
dim_lower_bound = tf.zeros(shape=[], dtype=inputs[0].dtype)
clipped_inputs = []
for one_d_input, dim_size in zip(inputs, lattice_sizes):
clipped_inputs.append(
tf.clip_by_value(
one_d_input,
clip_value_min=dim_lower_bound,
clip_value_max=dim_upper_bounds[dim_size]))
return clipped_inputs
def _bucketize_consequtive_equal_dims(inputs, lattice_sizes):
"""Groups consequite dimensions of same size together.
For example `lattice_sizes == [2, 2, 2, 5, 5, 2]` produce 3 buckets:
- bucket of size 3 which corresponds to first group of dimensions of size 2.
- bucket of size 2 which corresponds to group of dimensions of size 5.
- bucket of size 1 which corresponds to last dimension of size 2.
If `inputs` is a single tensor then it will be split accordig to buckets.
If `inputs` is a list of tensor then all buckets will be of size 1 regardless
of lattice sizes in order to avoid merging tensors. In this case function acts
merely as a convenience helper to unify output format.
Args:
inputs: `inputs` argument of `compute_interpolation_weights`.
lattice_sizes: list or tuple of integers which represents lattice sizes.
Returns:
Iterable of tuples: `(tensor, bucket_size, bucket_dim_size)` where
`tensor.shape[-1] == bucket_size` and `bucket_dim_size` is a lattice size
which corresponds to bucket.
"""
if not isinstance(inputs, list):
bucket_sizes = []
bucket_dim_sizes = []
current_size = 1
for i in range(1, len(lattice_sizes)):
if lattice_sizes[i] != lattice_sizes[i - 1]:
bucket_sizes.append(current_size)
bucket_dim_sizes.append(lattice_sizes[i - 1])
current_size = 1
else:
current_size += 1
bucket_sizes.append(current_size)
bucket_dim_sizes.append(lattice_sizes[-1])
inputs = tf.split(inputs, num_or_size_splits=bucket_sizes, axis=-1)
else:
# TODO: run benchmark and figure out whether it make sense to merge
# indiviaul tensors here.
bucket_sizes = [1] * len(lattice_sizes)
bucket_dim_sizes = lattice_sizes
return zip(inputs, bucket_sizes, bucket_dim_sizes)
def default_init_params(output_min, output_max):
"""Returns reasonable default parameters if not defined explicitly.
Args:
output_min: None or minimum layer output.
output_max: None or maximum layer output.
"""
if output_min is not None:
init_min = output_min
elif output_max is not None:
init_min = min(0.0, output_max)
else:
init_min = 0.0
if output_max is not None:
init_max = output_max
elif output_min is not None:
init_max = max(1.0, output_min)
else:
init_max = 1.0
# Return our min and max.
return init_min, init_max
def linear_initializer(lattice_sizes,
output_min,
output_max,
monotonicities=None,
unimodalities=None,
units=1,
dtype=tf.float32):
"""Returns a lattice layer weight tensor that represents a linear function.
- The linear function will have positive coefficients for monotonic dimensions
and 0 otherwise. If all dimensions are unconstrained, all coefficients will
be positive.
- Linear coefficients are set such that the minimum/maximum output of the
lattice matches the given output_min/output_max.
- Each monotonic dimension contributes with same weight regardless of number
of vertices per dimension.
- No dimension can be both monotonic and unimodal.
- Unimodal dimensions contribute with same weight as monotonic dimensions.
- Unimodal dimensions linearly decrease for first `(dim_size + 1) // 2`
vertices and then linearly increase for following vertices.
Args:
lattice_sizes: List or tuple of integers which represents lattice sizes.
output_min: Minimum output of lattice layer after initialization.
output_max: Maximum output of lattice layer after initialization.
monotonicities: None or list or tuple of same length as lattice_sizes of {0,
1} which represents monotonicity constraints per dimension. 1 stands for
increasing (non-decreasing in fact), 0 for no monotonicity constraints.
unimodalities: None or list or tuple of same length as lattice_sizes of {-1,
0, 1} which represents unimodality constraints per dimension. 1 indicates
that function first decreases then increases, -1 indicates that function
first increases then decreases, 0 indicates no unimodality constraints.
units: Output dimension of the layer. Each of units lattices will be
initialized identically.
dtype: dtype.
Returns:
Lattice weights tensor of shape: `(prod(lattice_sizes), units)`.
"""
verify_hyperparameters(
lattice_sizes=lattice_sizes,
monotonicities=monotonicities,
unimodalities=unimodalities)
if monotonicities is None:
monotonicities = [0] * len(lattice_sizes)
if unimodalities is None:
unimodalities = [0] * len(lattice_sizes)
num_constraint_dims = utils.count_non_zeros(monotonicities, unimodalities)
if num_constraint_dims == 0:
monotonicities = [1] * len(lattice_sizes)
num_constraint_dims = len(lattice_sizes)
dim_range = float(output_max - output_min) / num_constraint_dims
one_d_weights = []
for monotonicity, unimodality, dim_size in zip(monotonicities, unimodalities,
lattice_sizes):
if monotonicity != 0:
one_d = _linspace(start=0.0, stop=dim_range, num=dim_size)
elif unimodality != 0:
decreasing = _linspace(start=dim_range, stop=0.0, num=(dim_size + 1) // 2)
increasing = _linspace(start=0.0, stop=dim_range, num=(dim_size + 1) // 2)
# For odd size dimensions we want just 1 extreme point. For even sized we
# want 2.
if unimodality == 1:
one_d = decreasing + increasing[dim_size % 2:]
else:
one_d = increasing + decreasing[dim_size % 2:]
else:
one_d = [0.0] * dim_size
# Insert batch dim of size 1 at the beginning for batch_outer_operation.
one_d_weights.append(tf.constant(one_d, dtype=dtype, shape=[1, dim_size]))
# Use same implementation of outer operation as interpolation logic in order
# to guarantee same weights order.
weights = batch_outer_operation(one_d_weights, operation=tf.add)
weights = tf.reshape(weights + output_min, shape=[-1, 1])
if units > 1:
weights = tf.tile(weights, multiples=[1, units])
return weights
def _linspace(start, stop, num):
"""Returns `num` uniformly spaced floats between `start` and `stop`."""
if num == 1:
return [start]
return [start + (stop - start) * i / (num - 1.0) for i in range(num)]
def random_monotonic_initializer(lattice_sizes,
output_min,
output_max,
units=1,
dtype=tf.float32):
"""Returns a uniformly random sampled monotonic lattice layer weight tensor.
- The uniform random monotonic function will initilaize the lattice parameters
uniformly at random and make it such that the parameters are monotonically
increasing for each input.
- The random parameters will be sampled from `[output_min, output_max]`
Args:
lattice_sizes: List or tuple of integers which represents lattice sizes.
output_min: Minimum output of lattice layer after initialization.
output_max: Maximum output of lattice layer after initialization.
units: Output dimension of the layer. Each of units lattices will be
initialized identically.
dtype: dtype.
Returns:
Lattice weights tensor of shape: `(prod(lattice_sizes), units)`.
"""
# First we verify parameters
verify_hyperparameters(lattice_sizes=lattice_sizes)
dimension = len(lattice_sizes)
# Pre-compute the bases of the global index for each dimension.
index_bases = [1] * dimension
for i in range(0, dimension - 1)[::-1]:
index_bases[i] = index_bases[i + 1] * lattice_sizes[i + 1]
total_lattice_size = np.prod(lattice_sizes)
# Create parameter indices to later gather parameter values in the proper
# ordering.
lattice_parameter_indices = [0] * total_lattice_size
# Starting from the all-0 vertex, expand new vertices by getting the vertices
# that are children of the vertices expanded in the last iteration in terms of
# monotonic dependencies. Create constant tensor representing order of init
# mapping each index to its corresponding random parameter value.
parameter_index = 1
# Vertices expanded in the last iteration.
last_vertices = [0]
while last_vertices:
new_vertices_set = set()
for index in last_vertices:
remaining_index = index
# For each dimension, if the vertex is not at the end of that dimension,
# we can create a child of the current vertex by increasing the value
# of the vertex in that dimension by one.
for i in range(dimension):
index_base = index_bases[i]
# The value of the vertex index in the i'th dimension
index_dim = remaining_index // index_base
if index_dim < lattice_sizes[i] - 1:
new_index = index + index_base
if new_index not in new_vertices_set:
new_vertices_set.add(new_index)
remaining_index = remaining_index % index_base
# Randomly sort the vertices expanded in the current iteration. Note that
# there can be no monotonic dependency between vertices expanded in the same
# iteration because their sum of all dimensions are the same (we increase
# them one-by-one in each iteration).
new_vertices = list(new_vertices_set)
np.random.shuffle(new_vertices)
# Assign parameter values
for vertex in new_vertices:
lattice_parameter_indices[vertex] = parameter_index
parameter_index += 1
last_vertices = new_vertices
# Convert lattice_parameter_indices into a tensor.
lattice_parameter_indices = tf.constant(lattice_parameter_indices)
# Uniformly generate the random parameter values.
parameter_values = tf.random.uniform(
shape=[total_lattice_size],
minval=output_min,
maxval=output_max,
dtype=dtype)
parameter_values = tf.sort(parameter_values)
# Convert lattice_parameter_indices to weights tensor and tile if necessary.
weights = tf.gather(parameter_values, lattice_parameter_indices)
weights = tf.reshape(weights, shape=[-1, 1])
if units > 1:
weights = tf.tile(weights, multiples=[1, units])
return weights
# TODO: Add final projection for unimodality constraints.
def _approximately_project_monotonicity(weights, lattice_sizes, monotonicities):
"""Approximately projects to strictly meet monotonicity constraints.
Algorithm details:
Definition:
A[i] refer to i-th coordinate of vertex A.
For 2 vertices A and B:
"A <p B": if A[i] <= B[i] for all monotonic dimensions i. (aka dominated by
Pareto)
In order for lattice to be monotonic it is sufficient that either:
1) for any vertex V: weight[V] >= weight[X] for any vertex X that: X <p V.
or
2) for any vertex V: weight[V] <= weight[X] for any vertex X that: V <p X.
For example consider lattice:
```
0---1---2---3
| | | |
4---5---6---7
| | | |
8---9---10--11
```
For examle for vertex 6 it's sufficient that:
weight[6] >= max(weight[4, 5, 8, 9, 10])
Or:
weight[6] <= min(weight[2, 3, 7])
Given the above definition, we can use either of the following update rules to
approximately project into the feasible space:
max_proj[V] = max(weight[X]) for any X that: X <p V.
min_proj[V] = min(weight[X]) for any X that: V <p X.
It's clear though that these algorithms either only increase weights or only
decrease weights. We know that true projection algorithm increases some
weights and decreases others. To get closer to a true projection, we modify
and use both update rules as follows:
1) half_proj[V] = weight[V] + (max_proj[V] - weight[V]) / 2
... move half way up towards max_proj.
2) min_max_proj[V] = min_proj[half_proj[V]]
... move remained way down towards min_proj.
Differs from _project_partial_monotonicity in that this algorithm guarantees a
global satisfying solution for all monotonicity constraints.
Args:
weights: Tensor with weights whose shape matches lattice_sizes.
lattice_sizes: List or tuple of integers which represents lattice sizes.
which correspond to weights.
monotonicities: List or tuple of same length as lattice_sizes of {0, 1}
which represents monotonicity constraints per dimension. 1 stands for
increasing (non-decreasing in fact), 0 for no monotonicity constraints.
Returns:
Tensor with projected weights matching shape of input weights.
"""
# To compute max_proj[V] for all V altogether compute cumulative maximum
# along every monotonic dimension in arbitrary order.
max_projection = weights
for dim in range(len(lattice_sizes)):
if monotonicities[dim] == 0:
continue
layers = tf.unstack(max_projection, axis=dim)
for i in range(1, len(layers)):
# Computing cummulative maximum.
layers[i] = tf.maximum(layers[i], layers[i - 1])
max_projection = tf.stack(layers, axis=dim)
half_projection = (weights + max_projection) / 2.0
min_projection = half_projection
for dim in range(len(lattice_sizes)):
if monotonicities[dim] == 0:
continue
layers = tf.unstack(min_projection, axis=dim)
for i in range(len(layers) - 2, -1, -1):
# Compute cumulitive minimum in reversed order compare to cumulative
# maximum above.
layers[i] = tf.minimum(layers[i], layers[i + 1])
min_projection = tf.stack(layers, axis=dim)
return min_projection
def _approximately_project_edgeworth(weights, lattice_sizes, units,
edgeworth_trusts):
"""Approximately projects to strictly meet all edgeworth trust constraints.
Note that this function will not introduce violations to any
previously-satisfied monotonicity constraints.
Algorithm details:
For a constraint on main dimension i and conditional dimension j, consider
some slice of weights that is fixed along all other dimensions, leaving a grid
```
0---1---2---3
| | | |
4---5---6---7
| | | |
8---9---10--11
```
You can think of all the other dimensions as other such grids stacked behind
this one, e.g. weight[8] and the points behind it are all such points with
index 0 in the i'th and j'th dimensions, and weight[6] and the points behind
it are all such points with index 2 in the i'th dimension and index 1 in the
j'th.
To enforce this edgeworth trust constraint without messing up monotonicity or
other trust constraints, the key idea is that we will always translate all
points 'behind' a point on this grid together. This ensures that no other
trust constraints will be violated, since all other weight differences
constrained by trust constraints will occur 'behind' a single such point
(no conditional feature can also be a main feature).
With that in mind, we project to edgeworth trust on this grid while
maintaining monotonicity by working up and right and always increasing the
top-right point in each four-point square. Here, we would first find how much
we need to increase weight[5] by to maintain edgeworth trust on {4,5,8,9}. To
follow the principle above, we then consider all such squares 'behind'
{4,5,8,9} and find the biggest such difference. weight[5] and all points
behind will be increased by that amount, and then we continue until fixing the
top-right grid, {2,3,6,7}.
If the trust constraint is in the opposite direction, i.e. cond_direction =
-1, repeat all of the above except that we start in the top-right {2,3,6,7}
grid and always lower the bottom-left point (weight[6] to start) until we
reach the bottom-left {4,5,8,9} grid.
Differs from _project_partial_edgeworth in that this algorithm guarantees a
global satisfying solution for all edgeworth trust constraints.
Args:
weights: Tensor with weights whose shape matches lattice_sizes
plus units if units > 1.
lattice_sizes: List or tuple of integers which represents lattice sizes.
which correspond to weights.
units: Output dimension of the lattice.
edgeworth_trusts: None or iterable of three-element tuples. First element is
the index of the main (monotonic) feature. Second element is the index of
the conditional feature. Third element is the direction of trust: 1 if
higher values of the conditional feature should increase trust in the
main feature and -1 otherwise.
Returns:
Tensor with projected weights matching shape of input weights.
"""
# Project onto trust constraints by cumulatively fixing violations.
trust_projection = weights
for main_dim, cond_dim, cond_direction in edgeworth_trusts or []:
layers = _unstack_nd(trust_projection, [main_dim, cond_dim])
# Unlike other trust projections, cannot simply reverse layers beforehand
# based on cond_direction; asymmetry would break algorithm.
dims = len(layers[0][0].shape)
axis = (tf.constant(list(range(dims - 1)), dtype=tf.int32) if units > 1
else None)
if cond_direction > 0:
for i in range(0, lattice_sizes[main_dim] - 1):
for j in range(0, lattice_sizes[cond_dim] - 1):
difference_in_slopes = ((layers[i + 1][j] - layers[i][j]) -
(layers[i + 1][j + 1] - layers[i][j + 1]))
# Move all weights by the value of the biggest violation to both
# satisfy this constraint and not hurt others. See function comments
# for more details.
max_violation = tf.maximum(
tf.reduce_max(difference_in_slopes, axis=axis), 0)
layers[i + 1][j + 1] += max_violation
else:
for i in range(lattice_sizes[main_dim] - 2, -1, -1):
for j in range(lattice_sizes[cond_dim] - 2, -1, -1):
difference_in_slopes = ((layers[i + 1][j + 1] - layers[i][j + 1]) -
(layers[i + 1][j] - layers[i][j]))
max_violation = tf.maximum(
tf.reduce_max(difference_in_slopes, axis=axis), 0)
layers[i][j] -= max_violation
trust_projection = _stack_nd(layers, [main_dim, cond_dim])
return trust_projection
# TODO: It is likely that this algorithm will work for all trapezoid
# trust constraints without needing the reduce_max, as long as there are no
# edgeworth constraints. If true, consider using that approach when possible.
def _approximately_project_trapezoid(weights, lattice_sizes, units,
trapezoid_trusts, edgeworth_trusts):
"""Approximately projects to strictly meet all trapezoid trust constraints.
Note that this function will not introduce violations to any
previously-satisfied monotonicity or edgeworth constraints.
Algorithm details:
For a constraint on main dimension i and conditional dimension j, consider
some slice of weights that is fixed along all other dimensions, leaving a grid
```
0---1---2---3
| | | |
4---5---6---7
| | | |
8---9---10--11
```
You can think of all the other dimensions as other such grids stacked behind
this one, e.g. weight[8] and the points behind it are all such points with
index 0 in the i'th and j'th dimensions, and weight[6] and the points behind
it are all such points with index 2 in the i'th dimension and index 1 in the
j'th.
We project to trapezoid trust on this grid by working up both edges of
the lattice and only ever decreasing weights on the low main_feature side and
increasing weights on the high main_feature side. In the above example, we
would first consider the pair {8, 4} and update weight 4 to be min(8, 4),
before then looking at {4, 0} and updating 0 to be min(4, 0). Similarly set
weight 7 to be max(7, 11) and then weight 3 to max(3, 7). Flip the orders if
cond_direction is -1: work down instead of up.
Unlike in the edgeworth trust case, we do not necessarily look 'behind' the
page and update all points behind a given grid point by the maximum violation
at each step. It turns out that while this does have the nice property of
maintaining almost all types of edgeworth constraints, for the same reason
that the edgeworth algorithm does (co-movement of weights involved in other
constraints), it can actually break other trapezoid constraints, namely those
which share the same conditional feature.
There is one exception, which is the matching edgeworth trust constraint. In
this case, the trapezoid updates only touch one corner of each edgeworth
constraint and so can violate them. The solution is to update by the max of
all violations behind the page and all violations encountered below in the
grid.
If you separately update each grid by the violations in that grid, this update
procedure turns out to respect all trapezoid constraints. The rationale is a
bit more subtle than in the edgeworth case. The basic idea is that since each
trapezoid and monotonicity constraint operates on two weights that are next to
each other (i.e. differ only in the index of one dimension), we can create
a 'square' of points in which one edge goes across the constraint we want to
maintain and the perpendicular edges go across the constraint we are updating.
For example, consider the 4 weights
```
A -- B
| |
C -- D
```
A/B and C/D differ in the same one index (the constraint we hope to maintain)
while A/C and B/D differ across the conditional index of the trapezoid
constraint we are updating. Say we are focused on whether we maintain A'<=B'
(A' is A after imposing trapezoid trust) and we are operating on the 'min main
feature' side of the lattice so that any updates that occur will lower
weights. If B'=B after trapezoid trust, things are easy because A'<=A by 'min
main feature' and A<=B by the preexisting constraint. If not, and B'<B, we
start with A'<=C' by trapezoid trust and C'<=C by 'min main feature'. By
the preexisting constraints, C<=D, and by the trapezoid trust update procedure
and the fact that B has changed, it must be that B'=D.
Unfortunately, this algorithm will break edgeworth constraints.
The solution we take is to update independently for each grid whenever we have
only trapezoid constraints and to update with the max across all other
dimensions (and potentially below, in the case of matching constraints)
when there are both types of constraints, recognizing that in this second case
we may not achieve guarantees for trapezoid constraints which share a
conditional feature.
Differs from _project_partial_trapezoid in that this algorithm guarantees a
global satisfying solution for all trapezoid trust constraints.
Args:
weights: Tensor with weights whose shape matches lattice_sizes plus units
if units > 1.
lattice_sizes: List or tuple of integers which represents lattice sizes.
which correspond to weights.
units: Output dimension of the lattice.
trapezoid_trusts: None or iterable of three-element tuples. First element is
the index of the main (monotonic) feature. Second element is the index of
the conditional feature. Third element is the direction of trust set to 1
if higher values of the conditional feature should increase trust in the
main feature and -1 otherwise.
edgeworth_trusts: None or iterable of three-element tuples. First element is
the index of the main (monotonic) feature. Second element is the index of
the conditional feature. Third element is the direction of trust set to 1
if higher values of the conditional feature should increase trust in the
main feature and -1 otherwise.
Returns:
Tensor with projected weights matching shape of input weights.
"""
any_edgeworth = bool(edgeworth_trusts)
# Project onto trust constraints by cumulatively fixing violations.
for main_dim, cond_dim, cond_direction in trapezoid_trusts or []:
layers = _unstack_nd(weights, [main_dim, cond_dim])
max_main_dim = lattice_sizes[main_dim] - 1
same_edgeworth = (main_dim, cond_dim,
cond_direction) in set(edgeworth_trusts or [])
if cond_direction < 0:
layers = _reverse_second_list_dimension(layers)
lhs_update, rhs_update = 0, 0
for j in range(0, lattice_sizes[cond_dim] - 1):
lhs_difference = layers[0][j + 1] - layers[0][j]
lhs_update = _trapezoid_violation_update(lhs_difference, units,
any_edgeworth, same_edgeworth,
lhs_update)
layers[0][j + 1] -= lhs_update
rhs_difference = layers[max_main_dim][j] - layers[max_main_dim][j + 1]
rhs_update = _trapezoid_violation_update(rhs_difference, units,
any_edgeworth, same_edgeworth,
rhs_update)
layers[max_main_dim][j + 1] += rhs_update
if cond_direction < 0:
layers = _reverse_second_list_dimension(layers)
weights = _stack_nd(layers, [main_dim, cond_dim])
return weights
def _trapezoid_violation_update(differences, units, any_edgeworth,
same_edgeworth, prior_update):
"""Calculates update amount based on violations for trapezoid projection.
Note that the shape of the returned tensor is different based on the value
of the any_edgeworth boolean feature. A single-valued tensor is
returned when it is true, representing the amount by which all relevant
weights will be updated. A tensor matching the shape of differences is
returned when it is false, representing the individual updates to be applied
to each relevant weight.
Args:
differences: Tensor containing amounts by which constraints are satisfied or
violated.
units: Output dimension of the lattice.
any_edgeworth: Boolean for whether any edgeworth trust constraints are set
for this lattice layer.
same_edgeworth: Boolean for whether there is a matching edgeworth constraint
for the trapezoid constraint being updated.
prior_update: Tensor containing previous trapezoid constraint update.
Returns:
Tensor either matching the shape of the input differences tensor or
consisting of a single element.
"""
dims = len(differences.shape) - 1
axis = tf.constant(list(range(dims)), dtype=tf.int32) if units > 1 else None
if any_edgeworth and same_edgeworth:
return tf.maximum(tf.maximum(
tf.reduce_max(differences, axis=axis), 0), prior_update)
elif any_edgeworth:
return tf.maximum(tf.reduce_max(differences, axis=axis), 0)
else:
return tf.maximum(differences, 0)
def _approximately_project_bounds(weights, units, output_min, output_max):
"""Approximately projects to strictly meet min/max constraints.
Note that this function will not introduce violations to any
previously-satisfied monotonicity or trust constraints.
Algorithm details:
The idea of the min/max projection is to evenly scale (squish) the weights
to fit within the desired range. This ensures that the weight differences-of-
differences encountered in the trust constraints will not be affected.
For example, given min_weight < output_min < 0 < output_max < max_weight, we
will translate all weights such that min_weight = 0, then scale the weights
by the difference in ratios between max_weight - min_weight and output_max -