-
Notifications
You must be signed in to change notification settings - Fork 188
/
binary_backend.ex
2763 lines (2194 loc) · 84.5 KB
/
binary_backend.ex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
defmodule Nx.BinaryBackend do
@moduledoc """
An opaque backend written in pure Elixir that stores
the data in Elixir's binaries.
This is the default backend used by the `Nx` module.
The backend itself (and its data) is private and must
not be accessed directly.
"""
use Complex.Kernel
@behaviour Nx.Backend
@doc false
defstruct [:state]
alias Nx.Tensor, as: T
alias Nx.BinaryBackend, as: B
import Nx.Shared
import Bitwise, only: [>>>: 2, &&&: 2]
@impl true
def init(opts) do
if opts != [] do
raise ArgumentError, "Nx.BinaryBackend accepts no options"
end
opts
end
## Creation
@impl true
def constant(%{type: type, shape: shape} = out, constant, _backend_options) do
data = :binary.copy(number_to_binary(constant, type), Nx.size(shape))
from_binary(out, data)
end
@impl true
def iota(%{shape: {}, type: type} = out, nil, _backend_options) do
from_binary(out, number_to_binary(0, type))
end
def iota(%{shape: shape, type: type} = out, nil, backend_options) do
t = iota(%T{type: type, shape: {Nx.size(shape)}, names: [nil]}, 0, backend_options)
%{out | data: t.data}
end
def iota(%{shape: {n}, type: type} = out, 0, _backend_options) do
data = for i <- 0..(n - 1), do: number_to_binary(i, type)
from_binary(out, data)
end
def iota(%{shape: shape, type: type} = out, axis, _backend_options) do
{dims_before, [dim | dims_after]} =
shape
|> Tuple.to_list()
|> Enum.split(axis)
# Number of repetitions of an index in memory
repeat_blocks =
dims_after
|> Enum.reduce(1, &*/2)
# Number of cycles of the counting pattern
cycles =
dims_before
|> Enum.reduce(1, &*/2)
data =
for _ <- 1..cycles,
i <- 0..(dim - 1),
_ <- 1..repeat_blocks,
into: "",
do: number_to_binary(i, type)
from_binary(out, data)
end
@impl true
def eye(%{shape: shape, type: type} = out, _backend_options) do
one = number_to_binary(1, type)
zero = number_to_binary(0, type)
shape_size = tuple_size(shape)
m = elem(shape, shape_size - 2)
n = elem(shape, shape_size - 1)
count =
shape
|> Tuple.delete_at(shape_size - 1)
|> Tuple.delete_at(shape_size - 2)
|> Tuple.product()
data =
for _ <- 1..count, i <- 1..m, j <- 1..n, into: <<>> do
if i == j, do: one, else: zero
end
from_binary(out, data)
end
## Conversions
@impl true
def from_binary(t, binary, _backend_options), do: from_binary(t, binary)
if Application.compile_env(:nx, :verify_binary_size) do
defp from_binary(%{type: {_, bitsize}, shape: shape} = t, binary) when is_binary(binary) do
actual = byte_size(binary)
expected = Tuple.product(shape) * div(bitsize, 8)
unless actual == expected do
raise ArgumentError,
"unexpected size for tensor data, expected #{expected} bytes got: #{actual} bytes"
end
%{t | data: %B{state: binary}}
end
else
defp from_binary(t, binary) when is_binary(binary), do: %{t | data: %B{state: binary}}
end
defp from_binary(t, other), do: from_binary(t, IO.iodata_to_binary(other))
@impl true
def to_binary(%{type: {_backend_options, size}} = t, limit) do
limit = limit * div(size, 8)
binary = to_binary(t)
if byte_size(binary) == limit do
binary
else
binary_part(binary, 0, limit)
end
end
defp to_binary(%T{data: %{state: data}}), do: data
@impl true
def backend_copy(tensor, backend, opts) do
backend_transfer(tensor, backend, opts)
end
@impl true
def backend_transfer(tensor, Nx.Tensor, _opts) do
tensor
end
def backend_transfer(tensor, Nx.BinaryBackend, _opts) do
tensor
end
def backend_transfer(tensor, backend, opts) do
backend.from_binary(tensor, to_binary(tensor), opts)
end
@impl true
def backend_deallocate(_tensor) do
:ok
end
@impl true
def to_batched(out, %{type: {_, size}} = tensor, opts) do
leftover = opts[:leftover]
batch_size = elem(out.shape, 0)
axis_size = elem(tensor.shape, 0)
remainder = rem(axis_size, batch_size)
num_full_batches = div(axis_size, batch_size)
range =
if remainder != 0 and leftover == :repeat do
0..num_full_batches
else
0..(num_full_batches - 1)
end
binary = to_binary(tensor)
batch_bytes = Nx.size(out) * div(size, 8)
Stream.map(range, fn
^num_full_batches ->
before = num_full_batches * batch_bytes
available = byte_size(binary) - before
missing = batch_bytes - available
from_binary(out, [binary_part(binary, before, available), binary_part(binary, 0, missing)])
i ->
from_binary(out, binary_part(binary, i * batch_bytes, batch_bytes))
end)
end
## Shape
@impl true
def reshape(out, tensor), do: from_binary(out, to_binary(tensor))
@impl true
def squeeze(out, tensor, _axes), do: from_binary(out, to_binary(tensor))
## Broadcast
@impl true
def broadcast(out, t, shape, axes) do
from_binary(out, broadcast_data(t, shape, axes))
end
defp broadcast_data(%{shape: shape} = t, shape),
do: to_binary(t)
defp broadcast_data(t, shape),
do: broadcast_data(t, shape, Nx.Shape.broadcast_axes(t.shape, shape))
defp broadcast_data(%T{shape: {}} = t, shape, []) do
t
|> to_binary()
|> :binary.copy(Nx.size(shape))
end
defp broadcast_data(%T{shape: old_shape, type: {_, size}} = t, new_shape, axes) do
chunk_size = size * Nx.size(old_shape)
new_shape
|> Tuple.to_list()
|> unary_broadcast(0, old_shape, 0, axes, to_binary(t), chunk_size)
|> IO.iodata_to_binary()
end
# Old and new match
defp unary_broadcast([dim | dims], axis, old_shape, old_pos, [axis | axes], data, chunk_size)
when elem(old_shape, old_pos) == dim do
chunk_size = div(chunk_size, dim)
for <<chunk::size(chunk_size)-bitstring <- data>> do
unary_broadcast(dims, axis + 1, old_shape, old_pos + 1, axes, chunk, chunk_size)
end
end
# Implicit broadcasting
defp unary_broadcast([dim | dims], axis, old_shape, old_pos, [axis | axes], data, chunk_size)
when elem(old_shape, old_pos) == 1 do
for _ <- 1..dim do
unary_broadcast(dims, axis + 1, old_shape, old_pos + 1, axes, data, chunk_size)
end
end
# Explicit broadcasting (unmapped axes)
defp unary_broadcast([dim | dims], axis, old_shape, old_pos, axes, data, chunk_size) do
for _ <- 1..dim do
unary_broadcast(dims, axis + 1, old_shape, old_pos, axes, data, chunk_size)
end
end
defp unary_broadcast([], _axis, _old_shape, _old_pos, [], data, _chunk_size) do
data
end
## Shape
@impl true
def transpose(out, %T{shape: shape, type: {_, size}} = t, axes) do
data = to_binary(t)
{list, min, max} = transpose_axes(shape, axes)
weighted_shape = weighted_shape(shape, size)
# The chunk size is computed based on all dimensions
# before the minimum one being changed. For example,
# for {0, 1, 2, 3} and the swap is between 1 and 2,
# the chunk_size will be d1 * d2 * d3 * size.
chunk_size = weighted_chunk(weighted_shape, min, size)
# All of the major dimensions not being transposed can be
# read at once. For example, for {0, 1, 2, 3} and the swap
# is between 1 and 2, the read_size will be d3 * size.
read_size = weighted_chunk(weighted_shape, max + 1, size)
# And now how we will traverse
traverse_list = Enum.map(list, &Enum.fetch!(weighted_shape, &1))
data =
for <<chunk::size(chunk_size)-bitstring <- data>> do
weighted_traverse(traverse_list, chunk, read_size)
end
from_binary(out, data)
end
defp transpose_axes(shape, axes) do
size = tuple_size(shape)
{axes, min} = transpose_min(axes, 0)
{axes, max} = transpose_max(Enum.reverse(axes), size - 1)
{axes, min, max}
end
defp transpose_min([head | tail], head), do: transpose_min(tail, head + 1)
defp transpose_min(tail, head), do: {tail, head}
defp transpose_max([head | tail], head), do: transpose_max(tail, head - 1)
defp transpose_max(tail, head), do: {Enum.reverse(tail), head}
## Pad
# We ignore the out because we need to recur over the shape
# as we transpose and build the rest.
@impl true
def pad(out, t, pad_value, padding_config) do
pad_value = %{pad_value | type: out.type} |> as_type(pad_value) |> to_binary()
case t.shape do
{} ->
t
{_} ->
[{edge_low, edge_high, interior}] = padding_config
pad_last_dim(t, pad_value, edge_low, edge_high, interior)
_ ->
permutation = for i <- 0..(Nx.rank(t) - 2), do: i
permutation = [Nx.rank(t) - 1 | permutation]
for {edge_low, edge_high, interior} <- Enum.reverse(padding_config), reduce: t do
acc ->
Nx.transpose(pad_last_dim(acc, pad_value, edge_low, edge_high, interior),
axes: permutation
)
end
end
end
# Add padding to the high and low ends of the last dimension of a tensor
defp pad_last_dim(
%T{shape: shape, type: {_, size} = type} = t,
value,
edge_low,
edge_high,
interior
) do
view = aggregate_axes(to_binary(t), [tuple_size(shape) - 1], shape, size)
new_shape = pad_in_dim(shape, tuple_size(shape) - 1, edge_low, edge_high, interior)
edge_high_padding =
if edge_high <= 0,
do: <<>>,
else: for(_ <- 1..edge_high, into: <<>>, do: value)
edge_low_padding =
if edge_low <= 0,
do: <<>>,
else: for(_ <- 1..edge_low, into: <<>>, do: value)
interior_padding =
if interior == 0,
do: <<>>,
else: for(_ <- 1..interior, into: <<>>, do: value)
interior_padding_size = interior * size
interior_padded =
for bin <- view do
padded =
for <<dim::size(size)-bitstring <- bin>>, into: <<>> do
<<dim::size(size)-bitstring, interior_padding::bitstring>>
end
new_bytes = byte_size(padded) * 8 - interior_padding_size
<<new_bin::size(new_bytes)-bitstring, _::bitstring>> = padded
new_bin
end
data =
for bin <- interior_padded, into: <<>> do
cond do
edge_low < 0 and edge_high < 0 ->
low_byte = abs(edge_low) * size
high_byte = abs(edge_high) * size
new_bytes = byte_size(bin) * 8 - high_byte - low_byte
<<_::size(low_byte)-bitstring, new_bin::size(new_bytes)-bitstring, _::bitstring>> =
bin
new_bin
edge_low < 0 and edge_high >= 0 ->
low_byte = abs(edge_low) * size
<<_::size(low_byte)-bitstring, new_bin::bitstring>> = bin
<<new_bin::bitstring, edge_high_padding::bitstring>>
edge_low >= 0 and edge_high < 0 ->
high_byte = abs(edge_high) * size
new_bytes = byte_size(bin) * 8 - high_byte
<<new_bin::size(new_bytes)-bitstring, _::bitstring>> = bin
<<edge_low_padding::bitstring, new_bin::bitstring>>
true ->
<<edge_low_padding::bitstring, bin::bitstring, edge_high_padding::bitstring>>
end
end
from_binary(%{t | type: type, shape: new_shape}, data)
end
defp pad_in_dim(shape, dim, edge_low, edge_high, interior) do
dim_size = elem(shape, dim)
interior_padding_factor = (dim_size - 1) * interior
new_dim = dim_size + interior_padding_factor + edge_high + edge_low
put_elem(shape, dim, new_dim)
end
@impl true
def reverse(out, %{type: {_, size}, shape: shape} = t, axes) do
data = to_binary(t)
weighted_shape = weighted_shape(shape, size)
# Nx guaranteex axes is sorted and non-empty.
min = List.first(axes)
max = List.last(axes) + 1
# The chunk size is computed based on all dimensions
# before the minimum one being changed. For example,
# for {0, 1, 2, 3} and the reverse is between 1 and 2,
# the chunk_size will be d1 * d2 * d3 * size.
chunk_size = weighted_chunk(weighted_shape, min, size)
# All of the major dimensions not being reverse can be
# read at once. For example, for {0, 1, 2, 3} and the reverse
# is between 1 and 2, the read_size will be d3 * size.
read_size = weighted_chunk(weighted_shape, max, size)
# And now how we will traverse
traverse =
weighted_shape
|> Enum.take(max)
|> Enum.drop(min)
|> reverse_traverse(min, axes)
data =
for <<chunk::size(chunk_size)-bitstring <- data>> do
weighted_traverse(traverse, chunk, read_size)
end
from_binary(out, data)
end
defp reverse_traverse([head | tail], axis, axes) do
if axis in axes do
[&Enum.reverse/1, head | reverse_traverse(tail, axis + 1, axes)]
else
[head | reverse_traverse(tail, axis + 1, axes)]
end
end
defp reverse_traverse([], _axis, _axes), do: []
## Two-element
@impl true
def dot(out, left, contract_axes1, [], right, contract_axes2, []) do
# dot/4 is directed to this specific clause so we can keep a more efficient implementation
# for non-batched dot products. See the clause below for batched dot products
data = bin_dot(left, contract_axes1, right, contract_axes2, out.type)
from_binary(out, data)
end
def dot(
out,
%{shape: left_shape, type: {_, left_size}, names: left_names} = left,
left_contract_axes,
left_batch_axes,
%{shape: right_shape, type: {_, right_size}, names: right_names} = right,
right_contract_axes,
right_batch_axes
) do
left_binary = to_binary(left)
right_binary = to_binary(right)
left_batch_contract_axes =
Enum.map(left_contract_axes, fn axis -> axis - length(left_batch_axes) end)
right_batch_contract_axes =
Enum.map(right_contract_axes, fn axis -> axis - length(right_batch_axes) end)
{left_batch_shape, _left_batch_names} =
Nx.Shape.contract(left_shape, left_batch_axes, left_names, false)
{right_batch_shape, _right_batch_names} =
Nx.Shape.contract(right_shape, right_batch_axes, right_names, false)
left_batch_item_length = Nx.size(left_batch_shape)
right_batch_item_length = Nx.size(right_batch_shape)
batch_count = Enum.reduce(left_batch_axes, 1, fn x, acc -> elem(left_shape, x) * acc end)
range = if batch_count == 0, do: [], else: 0..(batch_count - 1)
left_batch_item_template = %{left | shape: left_batch_shape}
right_batch_item_template = %{right | shape: right_batch_shape}
bin_result =
for index <- range do
left_offset = index * left_batch_item_length
right_offset = index * right_batch_item_length
left_offset_bits = left_offset * left_size
right_offset_bits = right_offset * right_size
left_batch_item_bits = left_batch_item_length * left_size
right_batch_item_bits = right_batch_item_length * right_size
<<_::bitstring-size(left_offset_bits),
left_batch_item_binary::bitstring-size(left_batch_item_bits),
_::bitstring>> = left_binary
<<_::bitstring-size(right_offset_bits),
right_batch_item_binary::bitstring-size(right_batch_item_bits),
_::bitstring>> = right_binary
bin_dot(
from_binary(left_batch_item_template, left_batch_item_binary),
left_batch_contract_axes,
from_binary(right_batch_item_template, right_batch_item_binary),
right_batch_contract_axes,
out.type
)
end
from_binary(out, bin_result)
end
defp bin_dot(%{type: t1} = left, contract_axes1, %{type: t2} = right, contract_axes2, type) do
{left, left_contract_axes} = bin_dot_transpose_contract_axes(left, contract_axes1)
{right, right_contract_axes} = bin_dot_transpose_contract_axes(right, contract_axes2)
bin_zip_reduce(left, left_contract_axes, right, right_contract_axes, type, 0, fn
lhs, rhs, acc ->
res = binary_to_number(lhs, t1) * binary_to_number(rhs, t2) + acc
{res, res}
end)
end
defp bin_dot_transpose_contract_axes(tensor, contract_axes) do
# The intution here is that we can pre-condense the contracting axes into a
# single dimension, which will then be contracted through bin_zip_reduce below.
# This takes a shape {a, m, n, b} which contracts on m, n and turns it into
# {a, b, m * n}, contracting on the last dimension. This is necessary because
# bin_zip_reduce and aggregate_axes are order independent but dot depends
# on the axes order.
axes = Nx.axes(tensor)
remaining_axes =
contract_axes
|> Enum.sort(:desc)
|> Enum.reduce(axes, &List.delete_at(&2, &1))
transpose_axes = remaining_axes ++ contract_axes
transposed =
if transpose_axes == axes do
tensor
else
{shape, names} = Nx.Shape.transpose(tensor.shape, transpose_axes, tensor.names)
transpose(%{tensor | shape: shape, names: names}, tensor, transpose_axes)
end
{kept, contracted} =
transposed.shape
|> Tuple.to_list()
|> Enum.split(length(remaining_axes))
kept_shape = List.to_tuple(kept)
kept_size = tuple_size(kept_shape)
reduced_shape = Tuple.insert_at(kept_shape, kept_size, Enum.product(contracted))
{%{transposed | shape: reduced_shape, names: List.duplicate(nil, tuple_size(reduced_shape))},
[kept_size]}
end
## Element wise ternary ops
@impl true
def select(out, %{shape: {}} = pred, on_true, on_false) do
result =
if scalar_to_number(pred) == 0 do
on_false |> broadcast_data(out.shape) |> binary_to_binary(on_false.type, out.type, & &1)
else
on_true |> broadcast_data(out.shape) |> binary_to_binary(on_true.type, out.type, & &1)
end
from_binary(out, result)
end
def select(%{shape: shape, type: type} = out, pred, on_true, on_false) do
%T{type: {_, pred_size} = pred_type} = pred
%T{type: {_, left_size} = left_type} = on_true
%T{type: {_, right_size} = right_type} = on_false
pred_data = to_binary(pred)
on_true_data = broadcast_data(on_true, shape)
on_false_data = broadcast_data(on_false, shape)
data =
for i <- 0..(Nx.size(shape) - 1), into: <<>> do
pred =
match_types [pred_type] do
consumed = i * pred_size
<<_::size(consumed)-bitstring, match!(pred, 0), _::bitstring>> = pred_data
read!(pred, 0)
end
result =
if pred == 0 do
match_types [right_type] do
consumed = i * right_size
<<_::size(consumed)-bitstring, match!(x, 0), _::bitstring>> = on_false_data
read!(x, 0)
end
else
match_types [left_type] do
consumed = i * left_size
<<_::size(consumed)-bitstring, match!(x, 0), _::bitstring>> = on_true_data
read!(x, 0)
end
end
number_to_binary(result, type)
end
from_binary(out, data)
end
## Element wise bin ops
for fun <-
[:add, :subtract, :multiply, :pow, :remainder, :divide, :atan2, :min, :max, :quotient] ++
[:bitwise_and, :bitwise_or, :bitwise_xor, :left_shift, :right_shift] ++
[:equal, :not_equal, :greater, :less, :greater_equal, :less_equal] ++
[:logical_and, :logical_or, :logical_xor] do
capture = Macro.var(:"element_#{fun}", __MODULE__)
@impl true
def unquote(fun)(out, left, right) do
element_wise_bin_op(out, left, right, &(unquote(capture) / 3))
end
end
defp element_wise_bin_op(%{type: type} = out, %{shape: {}} = left, right, fun) do
number = scalar_to_number(left)
data =
binary_to_binary(to_binary(right), right.type, type, fn x ->
fun.(type, number, x)
end)
from_binary(out, data)
end
defp element_wise_bin_op(%{type: type} = out, left, %{shape: {}} = right, fun) do
number = scalar_to_number(right)
data =
binary_to_binary(to_binary(left), left.type, type, fn x ->
fun.(type, x, number)
end)
from_binary(out, data)
end
defp element_wise_bin_op(%{shape: shape, type: type} = out, left, right, fun) do
%T{type: {_, left_size} = left_type} = left
%T{type: {_, right_size} = right_type} = right
count = Nx.size(shape)
left_data = broadcast_data(left, shape)
right_data = broadcast_data(right, shape)
data =
match_types [left_type, right_type, type] do
for i <- 0..(count - 1), into: <<>> do
left_consumed = i * left_size
<<_::size(left_consumed)-bitstring, match!(x, 0), _::bitstring>> = left_data
x = read!(x, 0)
right_consumed = i * right_size
<<_::size(right_consumed)-bitstring, match!(y, 1), _::bitstring>> = right_data
y = read!(y, 1)
<<write!(fun.(type, x, y), 2)>>
end
end
from_binary(out, data)
end
defp element_add(_, a, b), do: Complex.add(a, b)
defp element_subtract(_, a, b), do: Complex.subtract(a, b)
defp element_multiply(_, a, b), do: Complex.multiply(a, b)
defp element_divide(_, a, b), do: Complex.divide(a, b)
defp element_quotient(_, a, b), do: div(a, b)
defp element_remainder(_, a, b) when is_integer(a) and is_integer(b), do: rem(a, b)
defp element_remainder(_, a, b), do: :math.fmod(a, b)
defp element_atan2(_, a, b), do: Complex.atan2(a, b)
defp element_max(_, :nan, _), do: :nan
defp element_max(_, _, :nan), do: :nan
defp element_max(_, :infinity, _), do: :infinity
defp element_max(_, _, :infinity), do: :infinity
defp element_max(_, :neg_infinity, x), do: x
defp element_max(_, x, :neg_infinity), do: x
defp element_max(_, a, b) when is_number(a) and is_number(b), do: max(a, b)
defp element_min(_, :nan, _), do: :nan
defp element_min(_, _, :nan), do: :nan
defp element_min(_, :infinity, x), do: x
defp element_min(_, x, :infinity), do: x
defp element_min(_, :neg_infinity, _), do: :neg_infinity
defp element_min(_, _, :neg_infinity), do: :neg_infinity
defp element_min(_, a, b) when is_number(a) and is_number(b), do: min(a, b)
defp element_pow({type, _}, a, b) when type in [:s, :u], do: Integer.pow(a, b)
defp element_pow(_, a, b), do: Complex.pow(a, b)
defp element_bitwise_and(_, a, b), do: :erlang.band(a, b)
defp element_bitwise_or(_, a, b), do: :erlang.bor(a, b)
defp element_bitwise_xor(_, a, b), do: :erlang.bxor(a, b)
defp element_left_shift(_, a, b) when is_number(b) and b >= 0,
do: :erlang.bsl(a, b)
defp element_left_shift(_, _, b), do: raise(ArgumentError, "cannot left shift by #{b}")
defp element_right_shift(_, a, b) when is_number(b) and b >= 0,
do: :erlang.bsr(a, b)
defp element_right_shift(_, _, b), do: raise(ArgumentError, "cannot right shift by #{b}")
defp element_equal(_, :nan, _), do: 0
defp element_equal(_, _, :nan), do: 0
defp element_equal(_, a, b), do: boolean_as_number(a == b)
defp element_not_equal(_, :nan, _), do: 1
defp element_not_equal(_, _, :nan), do: 1
defp element_not_equal(_, a, b), do: boolean_as_number(a != b)
defp element_logical_and(_, a, b), do: boolean_as_number(as_boolean(a) and as_boolean(b))
defp element_logical_or(_, a, b), do: boolean_as_number(as_boolean(a) or as_boolean(b))
defp element_logical_xor(_, a, b), do: boolean_as_number(as_boolean(a) != as_boolean(b))
defp element_greater(_, :nan, _), do: 0
defp element_greater(_, _, :nan), do: 0
defp element_greater(_, x, x), do: 0
defp element_greater(_, :infinity, _), do: 1
defp element_greater(_, _, :neg_infinity), do: 1
defp element_greater(_, :neg_infinity, _), do: 0
defp element_greater(_, _, :infinity), do: 0
defp element_greater(_, a, b), do: boolean_as_number(a > b)
defp element_less(_, :nan, _), do: 0
defp element_less(_, _, :nan), do: 0
defp element_less(_, :infinity, _), do: 0
defp element_less(_, _, :neg_infinity), do: 0
defp element_less(_, x, x), do: 0
defp element_less(_, _, :infinity), do: 1
defp element_less(_, :neg_infinity, _), do: 1
defp element_less(_, a, b), do: boolean_as_number(a < b)
defp element_greater_equal(_, :nan, _), do: 0
defp element_greater_equal(_, _, :nan), do: 0
defp element_greater_equal(_, x, x), do: 1
defp element_greater_equal(_, :neg_infinity, _), do: 0
defp element_greater_equal(_, _, :infinity), do: 0
defp element_greater_equal(_, :infinity, _), do: 1
defp element_greater_equal(_, _, :neg_infinity), do: 1
defp element_greater_equal(_, a, b), do: boolean_as_number(a >= b)
defp element_less_equal(_, :nan, _), do: 0
defp element_less_equal(_, _, :nan), do: 0
defp element_less_equal(_, _, :infinity), do: 1
defp element_less_equal(_, :neg_infinity, _), do: 1
defp element_less_equal(_, x, x), do: 1
defp element_less_equal(_, :infinity, _), do: 0
defp element_less_equal(_, _, :neg_infinity), do: 0
defp element_less_equal(_, a, b), do: boolean_as_number(a <= b)
defp as_boolean(n) when n == 0, do: false
defp as_boolean(%Complex{re: re, im: im}) when re == 0 and im == 0, do: false
defp as_boolean(_), do: true
defp boolean_as_number(true), do: 1
defp boolean_as_number(false), do: 0
## Element wise unary ops
for {name, {_desc, code, _formula}} <- Nx.Shared.unary_math_funs() do
@impl true
def unquote(name)(out, tensor) do
element_wise_unary_op(out, tensor, fn x -> unquote(code) end)
end
end
@impl true
def count_leading_zeros(out, %{type: {_, size}} = tensor) do
element_wise_bit_op(out, tensor, &element_clz(&1, size))
end
@impl true
def population_count(out, tensor) do
element_wise_bit_op(out, tensor, &element_popcount(&1, 0))
end
defp element_wise_bit_op(out, %{type: {_, size}} = tensor, fun) do
data =
match_types [out.type] do
for <<seg::unsigned-size(size)-native <- to_binary(tensor)>>, into: <<>> do
<<write!(fun.(seg), 0)>>
end
end
from_binary(out, data)
end
@impl true
def abs(out, tensor), do: element_wise_unary_op(out, tensor, &Complex.abs/1)
@impl true
def conjugate(out, tensor), do: element_wise_unary_op(out, tensor, &conjugate_fun/1)
defp conjugate_fun(n) when is_number(n), do: Complex.new(n, -0.0)
defp conjugate_fun(z), do: Complex.conjugate(z)
@impl true
def real(%{type: {_, component_size}} = out, %{type: {:c, _}} = tensor) do
data = to_binary(tensor)
result =
for <<real::bitstring-size(component_size), _::bitstring-size(component_size) <- data>>,
into: <<>>,
do: real
from_binary(out, result)
end
@impl true
def imag(%{type: {_, component_size}} = out, %{type: {:c, _}} = tensor) do
data = to_binary(tensor)
result =
for <<_::bitstring-size(component_size), imag::bitstring-size(component_size) <- data>>,
into: <<>>,
do: imag
from_binary(out, result)
end
@impl true
def bitwise_not(out, tensor), do: element_wise_unary_op(out, tensor, &:erlang.bnot/1)
@impl true
def is_nan(out, %{type: {t, _}}) when t in [:u, :s] do
# integers cannot represent nans, so we can just create
# a zero boolean tensor
# 8 bits per entry because we return u8
size = Nx.size(out.shape) * 8
from_binary(out, <<0::size(size)>>)
end
def is_nan(out, tensor) do
element_wise_unary_op(out, tensor, fn
%Complex{re: :nan} -> 1
%Complex{im: :nan} -> 1
:nan -> 1
_ -> 0
end)
end
@impl true
def is_infinity(out, %{type: {t, _}}) when t in [:u, :s] do
# integers cannot represent nans, so we can just create
# a zero boolean tensor
# 8 bits per entry because we return u8
size = Nx.size(out.shape) * 8
from_binary(out, <<0::size(size)>>)
end
def is_infinity(out, tensor) do
element_wise_unary_op(out, tensor, fn
%Complex{re: re} when re in [:infinity, :neg_infinity] -> 1
%Complex{im: im} when im in [:infinity, :neg_infinity] -> 1
:infinity -> 1
:neg_infinity -> 1
_ -> 0
end)
end
@impl true
def ceil(out, tensor), do: element_wise_unary_op(out, tensor, &:math.ceil/1)
@impl true
def floor(out, tensor), do: element_wise_unary_op(out, tensor, &:math.floor/1)
@impl true
def negate(out, tensor), do: element_wise_unary_op(out, tensor, &Complex.negate/1)
@impl true
def round(out, tensor), do: element_wise_unary_op(out, tensor, &:erlang.round/1)
@impl true
def sign(out, tensor), do: element_wise_unary_op(out, tensor, &element_sign/1)
defp element_sign(n) when n < 0, do: -1
defp element_sign(n) when n > 0, do: 1
defp element_sign(n), do: n
# https://en.wikipedia.org/wiki/Hamming_weight
# There are algorithms with faster worst case but they are size specific.
# The implementation below is also the most efficient for low counts. Given
# our integers are always 64 bits internally, we will have a lot of zeros
# internally, so this should be the fastest.
defp element_popcount(0, count), do: count
defp element_popcount(n, count), do: element_popcount(n &&& n - 1, count + 1)
defp element_wise_unary_op(out, tensor, fun) do
data = binary_to_binary(to_binary(tensor), tensor.type, out.type, fun)
from_binary(out, data)
end
defp element_clz(0, size), do: size
defp element_clz(n, 64), do: element_clz64(n)
defp element_clz(n, 32), do: element_clz32(n)
defp element_clz(n, 16), do: element_clz16(n)
defp element_clz(n, 8), do: element_clz8(n)
defp element_clz64(num) do
case num &&& 0xFFFFFFFF00000000 do
0 -> 32 + element_clz32(num)
_ -> element_clz32(num >>> 32)
end
end
defp element_clz32(num) do
case num &&& 0xFFFF0000 do
0 -> 16 + element_clz16(num)
_ -> element_clz16(num >>> 16)
end
end
defp element_clz16(num) do
case num &&& 0xFF00 do
0 -> 8 + element_clz8(num)
_ -> element_clz8(num >>> 8)
end
end
defp element_clz8(num) do
case num &&& 0xF0 do
0 -> 4 + element_clz4(num)
_ -> element_clz4(num >>> 4)
end
end
defp element_clz4(num) do
case num &&& 0xC do
0 -> 2 + element_clz2(num)
_ -> element_clz2(num >>> 2)
end
end
defp element_clz2(0), do: 2
defp element_clz2(1), do: 1
defp element_clz2(_), do: 0
## Inspect
@impl true
def inspect(tensor, inspect_opts) do
limit = inspect_opts.limit
binary = Nx.to_binary(tensor, if(limit == :infinity, do: [], else: [limit: limit + 1]))
Nx.Backend.inspect(tensor, binary, inspect_opts)
end
## Conv
@impl true
def conv(out, t, k, opts) do
padding = opts[:padding]
strides = opts[:strides]
input_dilation = opts[:input_dilation]
kernel_dilation = opts[:kernel_dilation]
feature_groups = opts[:feature_group_size]
batch_groups = opts[:batch_group_size]