Skip to content
This repository was archived by the owner on Sep 28, 2024. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
name: CI
env:
DATADEPS_ALWAYS_ACCEPT: true
on:
- push
- pull_request
Expand Down
11 changes: 10 additions & 1 deletion src/NeuralOperators.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,17 @@
module NeuralOperators
using DataDeps
using Fetch
using MAT
using Flux
using FFTW
using Tullio


function __init__()
register_datasets()
end

include("preprocess.jl")
include("data.jl")
include("fourier.jl")
include("models.jl")
end
8 changes: 2 additions & 6 deletions src/preprocess.jl → src/data.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,5 @@
using DataDeps
using Fetch
using MAT

export
get_data
get_burgers_data

function register_datasets()
register(DataDep(
Expand All @@ -19,7 +15,7 @@ function register_datasets()
))
end

function get_data(; n=1000, Δsamples=2^3, grid_size=div(2^13, Δsamples))
function get_burgers_data(; n=1000, Δsamples=2^3, grid_size=div(2^13, Δsamples))
file = matopen(joinpath(datadep"BurgersR10", "burgers_data_R10.mat"))
x_data = collect(read(file, "a")[1:n, 1:Δsamples:end]')
y_data = collect(read(file, "u")[1:n, 1:Δsamples:end]')
Expand Down
24 changes: 1 addition & 23 deletions src/fourier.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
using Flux
using FFTW
using Tullio

export
SpectralConv1d,
FourierOperator,
FNO
FourierOperator

c_glorot_uniform(dims...) = Flux.glorot_uniform(dims...) + Flux.glorot_uniform(dims...) * im

Expand Down Expand Up @@ -62,20 +57,3 @@ function FourierOperator(
x -> σ.(x)
)
end

function FNO()
modes = 16
ch = 64 => 64
σ = x -> @. log(1 + exp(x))

return Chain(
Dense(2, 64, init=c_glorot_uniform),
FourierOperator(ch, modes, σ),
FourierOperator(ch, modes, σ),
FourierOperator(ch, modes, σ),
FourierOperator(ch, modes),
Dense(64, 128, σ, init=c_glorot_uniform),
Dense(128, 1, init=c_glorot_uniform),
flatten
)
end
17 changes: 17 additions & 0 deletions src/models.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
export
FNO

function FNO(; ch = 64 => 64, modes=16)
σ = x -> @. log1p(exp(x))

return Chain(
Dense(2, 64, init=c_glorot_uniform),
FourierOperator(ch, modes, σ),
FourierOperator(ch, modes, σ),
FourierOperator(ch, modes, σ),
FourierOperator(ch, modes),
Dense(64, 128, σ, init=c_glorot_uniform),
Dense(128, 1, init=c_glorot_uniform),
flatten
)
end
2 changes: 1 addition & 1 deletion test/preprocess.jl → test/data.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
@testset "get data" begin
xs, ys = get_data()
xs, ys = get_burgers_data()

@test size(xs) == (2, 1024, 1000)
@test size(ys) == (1024, 1000)
Expand Down
19 changes: 4 additions & 15 deletions test/fourier.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
using Flux

@testset "SpectralConv1d" begin
modes = 16
ch = 64 => 64
Expand All @@ -9,13 +7,14 @@ using Flux
SpectralConv1d(ch, modes)
)

𝐱, _ = get_data()
𝐱, _ = get_burgers_data()
@test size(m(𝐱)) == (64, 1024, 1000)

T = Float32
loss(x, y) = Flux.mse(real.(m(x)), y)
data = [(T.(𝐱[:, :, 1:5]), rand(T, 64, 1024, 5))]
Flux.train!(loss, params(m), data, Flux.ADAM())
@test true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is redundant

end

@testset "FourierOperator" begin
Expand All @@ -27,22 +26,12 @@ end
FourierOperator(ch, modes)
)

𝐱, _ = get_data()
𝐱, _ = get_burgers_data()
@test size(m(𝐱)) == (64, 1024, 1000)

T = Float32
loss(x, y) = Flux.mse(real.(m(x)), y)
data = [(T.(𝐱[:, :, 1:5]), rand(T, 64, 1024, 5))]
Flux.train!(loss, params(m), data, Flux.ADAM())
end

@testset "FNO" begin
𝐱, 𝐲 = get_data()
𝐱, 𝐲 = Float32.(𝐱), Float32.(𝐲)
@test size(FNO()(𝐱)) == size(𝐲)

m = FNO()
loss(𝐱, 𝐲) = sum(abs2, 𝐲 .- m(𝐱)) / size(𝐱)[end]
data = [(𝐱[:, :, 1:5], 𝐲[:, 1:5])]
Flux.train!(loss, params(m), data, Flux.ADAM())
@test true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is redundant

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Due to the use of Flux.train, it is a nondeterministic process. @test true is added to make sure the process is passed.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The test case will still passed if the train! failed

end
11 changes: 11 additions & 0 deletions test/models.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
@testset "FNO" begin
𝐱, 𝐲 = get_burgers_data()
𝐱, 𝐲 = Float32.(𝐱), Float32.(𝐲)
@test size(FNO()(𝐱)) == size(𝐲)

m = FNO()
loss(𝐱, 𝐲) = sum(abs2, 𝐲 .- m(𝐱)) / size(𝐱)[end]
data = [(𝐱[:, :, 1:5], 𝐲[:, 1:5])]
Flux.train!(loss, params(m), data, Flux.ADAM())
@test true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is redundant

end
6 changes: 3 additions & 3 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
using Flux
using NeuralOperators
using Test

ENV["DATADEPS_ALWAYS_ACCEPT"] = true

@testset "NeuralOperators.jl" begin
include("preprocess.jl")
include("data.jl")
include("fourier.jl")
include("models.jl")
end