Skip to content

Commit

Permalink
ci: taming down CI timings (#903)
Browse files Browse the repository at this point in the history
* docs: fix links

* test: run tests in parallel

* fix: patch for jacobian

* ci: remove Logging group

* test: remove redundant files

* test: rename file

* ci: remove Logging group

* ci: allow depwarn

* test: fix typo

* ci: cancel intermediate runs

* test: minor fixes

* test: more testing

* test: more test fixes

* test: adjust strategy to reduce runtime

* docs: simplify the examples
  • Loading branch information
avik-pal authored Oct 18, 2024
1 parent 5761524 commit 4a66eb8
Show file tree
Hide file tree
Showing 31 changed files with 1,311 additions and 1,208 deletions.
6 changes: 5 additions & 1 deletion .github/workflows/Downgrade.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,11 @@ on:
- master
paths-ignore:
- 'docs/**'

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.ref_name != github.event.repository.default_branch || github.ref != 'refs/tags/v*' }}

jobs:
test:
runs-on: ubuntu-latest
Expand All @@ -23,7 +28,6 @@ jobs:
- NNPDE1
- NNPDE2
- AdaptiveLoss
- Logging
- Forward
- DGM
- NNODE
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/Tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ jobs:
- "NNPDE1"
- "NNPDE2"
- "AdaptiveLoss"
- "Logging"
- "Forward"
- "DGM"
- "NNODE"
Expand All @@ -45,4 +44,5 @@ jobs:
group: "${{ matrix.group }}"
julia-version: "${{ matrix.version }}"
coverage-directories: "src,ext"
julia-runtest-depwarn: "yes" # TensorBoardLogger has a global depwarn
secrets: "inherit"
14 changes: 8 additions & 6 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ NeuralPDETensorBoardLoggerExt = "TensorBoardLogger"
ADTypes = "1.9.0"
Adapt = "4"
AdvancedHMC = "0.6.1"
Aqua = "0.8"
Aqua = "0.8.9"
ArrayInterface = "7.11"
CUDA = "5.5.2"
ChainRulesCore = "1.24"
Expand All @@ -69,7 +69,9 @@ ExplicitImports = "1.10.1"
Flux = "0.14.22"
ForwardDiff = "0.10.36"
Functors = "0.4.12"
Hwloc = "3.3.0"
Integrals = "4.5"
InteractiveUtils = "<0.0.1, 1"
IntervalSets = "0.7.10"
LineSearches = "7.3"
LinearAlgebra = "1.10"
Expand All @@ -88,14 +90,13 @@ Optimization = "4"
OptimizationOptimJL = "0.4"
OptimizationOptimisers = "0.3"
OrdinaryDiffEq = "6.87"
Pkg = "1.10"
Printf = "1.10"
QuasiMonteCarlo = "0.3.2"
Random = "1"
ReTestItems = "1.29.0"
RecursiveArrayTools = "3.27.0"
Reexport = "1.2"
RuntimeGeneratedFunctions = "0.5.12"
SafeTestsets = "0.1"
SciMLBase = "2.56"
Statistics = "1.10"
StochasticDiffEq = "6.69.1"
Expand All @@ -114,18 +115,19 @@ CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
DiffEqNoiseProcess = "77a26b50-5914-5dd7-bc55-306e6241c503"
ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Hwloc = "0e44f5e4-bd66-52a0-8798-143a42290a1d"
InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
LineSearches = "d3d80556-e9d4-5f37-9878-2ab0fcc64255"
LuxCUDA = "d0bbae9a-e099-4d5b-a835-1c6931763bda"
LuxCore = "bb33d45b-7691-41d6-9220-0943567d0623"
LuxLib = "82251201-b29d-42c6-8e01-566dec8acb11"
MethodOfLines = "94925ecb-adb7-4558-8ed8-f975c56a0bf4"
OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823"
StochasticDiffEq = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0"
TensorBoardLogger = "899adc3e-224a-11e9-021f-63837185c80f"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Aqua", "CUDA", "DiffEqNoiseProcess", "ExplicitImports", "Flux", "LineSearches", "LuxCUDA", "LuxCore", "LuxLib", "MethodOfLines", "OptimizationOptimJL", "OrdinaryDiffEq", "Pkg", "SafeTestsets", "StochasticDiffEq", "TensorBoardLogger", "Test"]
test = ["Aqua", "CUDA", "DiffEqNoiseProcess", "ExplicitImports", "Flux", "Hwloc", "InteractiveUtils", "LineSearches", "LuxCUDA", "LuxCore", "LuxLib", "MethodOfLines", "OptimizationOptimJL", "OrdinaryDiffEq", "ReTestItems", "StochasticDiffEq", "TensorBoardLogger", "Test"]
3 changes: 1 addition & 2 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,4 @@ makedocs(sitename = "NeuralPDE.jl",
canonical = "https://docs.sciml.ai/NeuralPDE/stable/"),
pages = pages)

deploydocs(repo = "github.com/SciML/NeuralPDE.jl.git";
push_preview = true)
deploydocs(repo = "github.com/SciML/NeuralPDE.jl.git"; push_preview = true)
2 changes: 1 addition & 1 deletion docs/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ networks which both approximate physical laws and real data simultaneously.
- Specialized forms for solving `ODEProblem`s with neural networks.
- Compatibility with [Flux.jl](https://fluxml.ai/) and [Lux.jl](https://lux.csail.mit.edu/).
for all the GPU-powered machine learning layers available from those libraries.
- Compatibility with [NeuralOperators.jl](https://docs.sciml.ai/NeuralOperators/stable/) for
- Compatibility with [NeuralOperators.jl](https://github.com/SciML/NeuralOperators.jl) for
mixing DeepONets and other neural operators (Fourier Neural Operators, Graph Neural Operators,
etc.) with physics-informed loss functions.

Expand Down
52 changes: 28 additions & 24 deletions docs/src/tutorials/derivative_neural_network.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,8 @@ We approximate the derivative of the neural network with another neural network
using the second numeric derivative `Dt(Dtu1(t,x))`.

```@example derivativenn
using NeuralPDE, Lux, ModelingToolkit
using Optimization, OptimizationOptimisers, OptimizationOptimJL, LineSearches
using Plots
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimisers,
OptimizationOptimJL, LineSearches, Plots
using ModelingToolkit: Interval, infimum, supremum
@parameters t, x
Expand All @@ -63,35 +62,40 @@ Dx = Differential(x)
@variables u1(..), u2(..), u3(..)
@variables Dxu1(..) Dtu1(..) Dxu2(..) Dtu2(..)
eqs_ = [Dt(Dtu1(t, x)) ~ Dx(Dxu1(t, x)) + u3(t, x) * sin(pi * x),
Dt(Dtu2(t, x)) ~ Dx(Dxu2(t, x)) + u3(t, x) * cos(pi * x),
exp(-t) ~ u1(t, x) * sin(pi * x) + u2(t, x) * cos(pi * x)]
bcs_ = [u1(0.0, x) ~ sin(pi * x),
u2(0.0, x) ~ cos(pi * x),
Dt(u1(0, x)) ~ -sin(pi * x),
Dt(u2(0, x)) ~ -cos(pi * x),
eqs_ = [
Dt(Dtu1(t, x)) ~ Dx(Dxu1(t, x)) + u3(t, x) * sinpi(x),
Dt(Dtu2(t, x)) ~ Dx(Dxu2(t, x)) + u3(t, x) * cospi(x),
exp(-t) ~ u1(t, x) * sinpi(x) + u2(t, x) * cospi(x)
]
bcs_ = [
u1(0.0, x) ~ sinpi(x),
u2(0.0, x) ~ cospi(x),
Dt(u1(0, x)) ~ -sinpi(x),
Dt(u2(0, x)) ~ -cospi(x),
u1(t, 0.0) ~ 0.0,
u2(t, 0.0) ~ exp(-t),
u1(t, 1.0) ~ 0.0,
u2(t, 1.0) ~ -exp(-t)]
u2(t, 1.0) ~ -exp(-t)
]
der_ = [Dt(u1(t, x)) ~ Dtu1(t, x),
der_ = [
Dt(u1(t, x)) ~ Dtu1(t, x),
Dt(u2(t, x)) ~ Dtu2(t, x),
Dx(u1(t, x)) ~ Dxu1(t, x),
Dx(u2(t, x)) ~ Dxu2(t, x)]
Dx(u2(t, x)) ~ Dxu2(t, x)
]
bcs__ = [bcs_; der_]
# Space and time domains
domains = [t ∈ Interval(0.0, 1.0),
x ∈ Interval(0.0, 1.0)]
domains = [t ∈ Interval(0.0, 1.0), x ∈ Interval(0.0, 1.0)]
input_ = length(domains)
n = 15
chain = [Lux.Chain(Dense(input_, n, Lux.σ), Dense(n, n, Lux.σ), Dense(n, 1)) for _ in 1:7]
chain = [Chain(Dense(input_, n, σ), Dense(n, n, σ), Dense(n, 1)) for _ in 1:7]
training_strategy = QuadratureTraining(; batch = 200, reltol = 1e-6, abstol = 1e-6)
training_strategy = StochasticTraining(128)
discretization = PhysicsInformedNN(chain, training_strategy)
vars = [u1(t, x), u2(t, x), u3(t, x), Dxu1(t, x), Dtu1(t, x), Dxu2(t, x), Dtu2(t, x)]
Expand Down Expand Up @@ -126,13 +130,13 @@ using Plots
ts, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains]
minimizers_ = [res.u.depvar[sym_prob.depvars[i]] for i in 1:length(chain)]
u1_real(t, x) = exp(-t) * sin(pi * x)
u2_real(t, x) = exp(-t) * cos(pi * x)
u1_real(t, x) = exp(-t) * sinpi(x)
u2_real(t, x) = exp(-t) * cospi(x)
u3_real(t, x) = (1 + pi^2) * exp(-t)
Dxu1_real(t, x) = pi * exp(-t) * cos(pi * x)
Dtu1_real(t, x) = -exp(-t) * sin(pi * x)
Dxu2_real(t, x) = -pi * exp(-t) * sin(pi * x)
Dtu2_real(t, x) = -exp(-t) * cos(pi * x)
Dxu1_real(t, x) = pi * exp(-t) * cospi(x)
Dtu1_real(t, x) = -exp(-t) * sinpi(x)
Dxu2_real(t, x) = -pi * exp(-t) * sinpi(x)
Dtu2_real(t, x) = -exp(-t) * cospi(x)
function analytic_sol_func_all(t, x)
[u1_real(t, x), u2_real(t, x), u3_real(t, x),
Expand Down
Loading

0 comments on commit 4a66eb8

Please sign in to comment.