Skip to content

Commit 32ee3db

Browse files
authored
V0.13.0 examples (#147)
* example updates * rm jupyter content * modified tests, added GaussianAdjoint * rm available sensitivity test * ... * fixing FMIImport 1.0.6 * fixed juliacon23 example * longer training for tests * SciML code formatting * minor modification to test net #2
1 parent 322f028 commit 32ee3db

38 files changed

+4064
-2371
lines changed

.github/workflows/Formatter.yml

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
name: Format suggestions
2+
on:
3+
pull_request:
4+
# this argument is not required if you don't use the `suggestion-label` input
5+
types: [ opened, reopened, synchronize, labeled, unlabeled ]
6+
jobs:
7+
code-style:
8+
runs-on: ubuntu-latest
9+
steps:
10+
- uses: julia-actions/julia-format@v3
11+
with:
12+
version: '1' # Set `version` to '1.0.54' if you need to use JuliaFormatter.jl v1.0.54 (default: '1')
13+
suggestion-label: 'format-suggest' # leave this unset or empty to show suggestions for all PRs
14+

Project.toml

+33-33
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,33 @@
1-
name = "FMIFlux"
2-
uuid = "fabad875-0d53-4e47-9446-963b74cae21f"
3-
version = "0.13.0"
4-
5-
[deps]
6-
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
7-
DifferentiableEigen = "73a20539-4e65-4dcb-a56d-dc20f210a01b"
8-
FMIImport = "9fcbc62e-52a0-44e9-a616-1359a0008194"
9-
FMISensitivity = "3e748fe5-cd7f-4615-8419-3159287187d2"
10-
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
11-
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
12-
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
13-
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
14-
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
15-
ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431"
16-
17-
[weakdeps]
18-
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
19-
20-
[extensions]
21-
JLD2Ext = ["JLD2"]
22-
23-
[compat]
24-
Colors = "0.12"
25-
DifferentiableEigen = "0.2.0"
26-
FMIImport = "1.0.0"
27-
FMISensitivity = "0.2.0"
28-
Flux = "0.9 - 0.14"
29-
Optim = "1.6"
30-
OrdinaryDiffEq = "6.0"
31-
Statistics = "1"
32-
ThreadPools = "2.1"
33-
julia = "1.6"
1+
name = "FMIFlux"
2+
uuid = "fabad875-0d53-4e47-9446-963b74cae21f"
3+
version = "0.13.0"
4+
5+
[deps]
6+
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
7+
DifferentiableEigen = "73a20539-4e65-4dcb-a56d-dc20f210a01b"
8+
FMIImport = "9fcbc62e-52a0-44e9-a616-1359a0008194"
9+
FMISensitivity = "3e748fe5-cd7f-4615-8419-3159287187d2"
10+
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
11+
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
12+
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
13+
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
14+
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
15+
ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431"
16+
17+
[weakdeps]
18+
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
19+
20+
[extensions]
21+
JLD2Ext = ["JLD2"]
22+
23+
[compat]
24+
Colors = "0.12"
25+
DifferentiableEigen = "0.2.0"
26+
FMIImport = "1.0.6"
27+
FMISensitivity = "0.2.0"
28+
Flux = "0.9 - 0.14"
29+
Optim = "1.6"
30+
OrdinaryDiffEq = "6.0"
31+
Statistics = "1"
32+
ThreadPools = "2.1"
33+
julia = "1.6"

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ You can evaluate FMUs inside of your loss function.
1717
[![Run PkgEval](https://github.com/ThummeTo/FMIFlux.jl/actions/workflows/Eval.yml/badge.svg)](https://github.com/ThummeTo/FMIFlux.jl/actions/workflows/Eval.yml)
1818
[![Coverage](https://codecov.io/gh/ThummeTo/FMIFlux.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/ThummeTo/FMIFlux.jl)
1919
[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
20-
[![FMIFlux Downloads](https://shields.io/endpoint?url=https://pkgs.genieframework.com/api/v1/badge/FMIFlux)](https://pkgs.genieframework.com?packages=FMIFlux)
20+
[![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle)
2121

2222
## How can I use FMIFlux.jl?
2323

docs/make.jl

+34-28
Original file line numberDiff line numberDiff line change
@@ -3,36 +3,38 @@
33
# Licensed under the MIT license. See LICENSE file in the project root for details.
44
#
55

6-
import Pkg; Pkg.develop(path=joinpath(@__DIR__,"../../FMIFlux.jl"))
6+
import Pkg;
7+
Pkg.develop(path = joinpath(@__DIR__, "../../FMIFlux.jl"));
78
using Documenter, FMIFlux
89
using Documenter: GitHubActions
910

10-
makedocs(sitename="FMIFlux.jl",
11-
format = Documenter.HTML(
12-
collapselevel = 1,
13-
sidebar_sitename = false,
14-
edit_link = nothing,
15-
size_threshold_ignore = [joinpath("examples","juliacon_2023.md")]
16-
),
17-
warnonly=true,
18-
pages= Any[
19-
"Introduction" => "index.md"
20-
"Examples" => [
21-
"Overview" => "examples/overview.md"
22-
"Simple CS-NeuralFMU" => "examples/simple_hybrid_CS.md"
23-
"Simple ME-NeuralFMU" => "examples/simple_hybrid_ME.md"
24-
"Growing Horizon ME-NeuralFMU" => "examples/growing_horizon_ME.md"
25-
"JuliaCon 2023" => "examples/juliacon_2023.md"
26-
"MDPI 2022" => "examples/mdpi_2022.md"
27-
"Modelica Conference 2021" => "examples/modelica_conference_2021.md"
28-
"Pluto Workshops" => "examples/workshops.md"
29-
]
30-
"FAQ" => "faq.md"
31-
"Library Functions" => "library.md"
32-
"Related Publication" => "related.md"
33-
"Contents" => "contents.md"
34-
]
35-
)
11+
makedocs(
12+
sitename = "FMIFlux.jl",
13+
format = Documenter.HTML(
14+
collapselevel = 1,
15+
sidebar_sitename = false,
16+
edit_link = nothing,
17+
size_threshold_ignore = [joinpath("examples", "juliacon_2023.md")],
18+
),
19+
warnonly = true,
20+
pages = Any[
21+
"Introduction" => "index.md"
22+
"Examples" => [
23+
"Overview" => "examples/overview.md"
24+
"Simple CS-NeuralFMU" => "examples/simple_hybrid_CS.md"
25+
"Simple ME-NeuralFMU" => "examples/simple_hybrid_ME.md"
26+
"Growing Horizon ME-NeuralFMU" => "examples/growing_horizon_ME.md"
27+
"JuliaCon 2023" => "examples/juliacon_2023.md"
28+
"MDPI 2022" => "examples/mdpi_2022.md"
29+
"Modelica Conference 2021" => "examples/modelica_conference_2021.md"
30+
"Pluto Workshops" => "examples/workshops.md"
31+
]
32+
"FAQ" => "faq.md"
33+
"Library Functions" => "library.md"
34+
"Related Publication" => "related.md"
35+
"Contents" => "contents.md"
36+
],
37+
)
3638

3739
function deployConfig()
3840
github_repository = get(ENV, "GITHUB_REPOSITORY", "")
@@ -44,4 +46,8 @@ function deployConfig()
4446
return GitHubActions(github_repository, github_event_name, github_ref)
4547
end
4648

47-
deploydocs(repo = "github.com/ThummeTo/FMIFlux.jl.git", devbranch = "main", deploy_config = deployConfig())
49+
deploydocs(
50+
repo = "github.com/ThummeTo/FMIFlux.jl.git",
51+
devbranch = "main",
52+
deploy_config = deployConfig(),
53+
)

examples/jupyter-src/.gitignore

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
params/
2-
*.png
2+
*.png
3+
*.gif

examples/jupyter-src/juliacon_2023.ipynb

+19-19
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
},
2626
{
2727
"cell_type": "code",
28-
"execution_count": null,
28+
"execution_count": 1,
2929
"metadata": {},
3030
"outputs": [],
3131
"source": [
@@ -65,7 +65,7 @@
6565
},
6666
{
6767
"cell_type": "code",
68-
"execution_count": null,
68+
"execution_count": 2,
6969
"metadata": {},
7070
"outputs": [],
7171
"source": [
@@ -74,7 +74,7 @@
7474
"using FMIFlux # for NeuralFMUs\n",
7575
"using FMIZoo # a collection of demo models, including the VLDM\n",
7676
"using FMIFlux.Flux # Machine Learning in Julia\n",
77-
"using DifferentialEquations # for picking a NeuralFMU solver\n",
77+
"using DifferentialEquations: Tsit5 # for picking a NeuralFMU solver\n",
7878
"\n",
7979
"import JLD2 # data format for saving/loading parameters\n",
8080
"\n",
@@ -91,7 +91,7 @@
9191
},
9292
{
9393
"cell_type": "code",
94-
"execution_count": null,
94+
"execution_count": 3,
9595
"metadata": {},
9696
"outputs": [],
9797
"source": [
@@ -108,7 +108,7 @@
108108
},
109109
{
110110
"cell_type": "code",
111-
"execution_count": null,
111+
"execution_count": 4,
112112
"metadata": {},
113113
"outputs": [],
114114
"source": [
@@ -372,7 +372,7 @@
372372
},
373373
{
374374
"cell_type": "code",
375-
"execution_count": null,
375+
"execution_count": 15,
376376
"metadata": {},
377377
"outputs": [],
378378
"source": [
@@ -408,7 +408,7 @@
408408
},
409409
{
410410
"cell_type": "code",
411-
"execution_count": null,
411+
"execution_count": 17,
412412
"metadata": {},
413413
"outputs": [],
414414
"source": [
@@ -489,7 +489,7 @@
489489
},
490490
{
491491
"cell_type": "code",
492-
"execution_count": null,
492+
"execution_count": 19,
493493
"metadata": {},
494494
"outputs": [],
495495
"source": [
@@ -552,7 +552,7 @@
552552
},
553553
{
554554
"cell_type": "code",
555-
"execution_count": null,
555+
"execution_count": 22,
556556
"metadata": {},
557557
"outputs": [],
558558
"source": [
@@ -737,7 +737,7 @@
737737
" params = FMIFlux.params(neuralFMU)\n",
738738
"\n",
739739
" # initialize the scheduler, keywords are passed to the NeuralFMU\n",
740-
" initialize!(scheduler; parameters=data.params, p=params[1], showProgress=showProgress)\n",
740+
" FMIFlux.initialize!(scheduler; parameters=data.params, p=params[1], showProgress=showProgress)\n",
741741
" \n",
742742
" # initialize Adam optimizer with our hyperparameters\n",
743743
" optim = Adam(ETA, (BETA1, BETA2))\n",
@@ -747,16 +747,16 @@
747747
" neuralFMU, # the neural FMU including the parameters to train\n",
748748
" Iterators.repeated((), steps), # an iterator repeating `steps` times\n",
749749
" optim; # the optimizer to train\n",
750-
" gradient=:ForwardDiff, # currently, only ForwardDiff leads to good results for multi-event systems\n",
751-
" chunk_size=32, # ForwardDiff chunk_size (=number of parameter estimations per run)\n",
752-
" cb=() -> update!(scheduler), # update the scheduler after every step \n",
750+
" gradient=:ReverseDiff, # ForwardDiff leads to good results for multi-event systems\n",
751+
" chunk_size=32, # ForwardDiff chunk_size (=number of parameter estimations per run) - only if ForwardDiff is used\n",
752+
" cb=() -> FMIFlux.update!(scheduler), # update the scheduler after every step \n",
753753
" proceed_on_assert=true) # proceed, even if assertions are thrown, with the next step\n",
754754
" \n",
755755
" # the default execution mode\n",
756756
" singleInstanceMode(fmu, false)\n",
757757
"\n",
758758
" # save our result parameters\n",
759-
" fmiSaveParameters(neuralFMU, joinpath(@__DIR__, \"params\", \"$(ind).jld2\"))\n",
759+
" FMIFlux.saveParameters(neuralFMU, joinpath(@__DIR__, \"params\", \"$(ind).jld2\"))\n",
760760
" \n",
761761
" # simulate the NeuralFMU on a validation trajectory\n",
762762
" resultNFMU = neuralFMU(x0, (data_validation.consumption_t[1], data_validation.consumption_t[end]); parameters=data_validation.params, showProgress=showProgress, maxiters=1e7, saveat=data_validation.consumption_t)\n",
@@ -817,11 +817,11 @@
817817
"neuralFMU = build_NFMU(fmu)\n",
818818
"\n",
819819
"# load parameters from hyperparameter optimization\n",
820-
"loadParameters(neuralFMU, joinpath(@__DIR__, \"juliacon_2023.jld2\"))\n",
820+
"FMIFlux.loadParameters(neuralFMU, joinpath(@__DIR__, \"juliacon_2023.jld2\"))\n",
821821
"\n",
822822
"# simulate and plot the NeuralFMU\n",
823-
"resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n",
824-
"resultFMU = fmiSimulate(fmu, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n",
823+
"resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n",
824+
"resultFMU = simulate(fmu, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n",
825825
"\n",
826826
"# plot the NeuralFMU, original FMU and data (cumulative consumption)\n",
827827
"fig = plot(resultNFMU; stateIndices=6:6, stateEvents=false, timeEvents=false, label=\"NeuralFMU\", ylabel=\"cumulative consumption [m/s]\")\n",
@@ -929,7 +929,7 @@
929929
},
930930
{
931931
"cell_type": "code",
932-
"execution_count": null,
932+
"execution_count": 33,
933933
"metadata": {},
934934
"outputs": [],
935935
"source": [
@@ -957,7 +957,7 @@
957957
},
958958
{
959959
"cell_type": "code",
960-
"execution_count": null,
960+
"execution_count": 34,
961961
"metadata": {},
962962
"outputs": [],
963963
"source": [

examples/jupyter-src/juliacon_2023_distributedhyperopt.jl

+38-24
Original file line numberDiff line numberDiff line change
@@ -13,34 +13,48 @@ using DistributedHyperOpt # add via `add "https://github.com/ThummeTo/Distribu
1313
# ENV["JULIA_DEBUG"] = "DistributedHyperOpt"
1414

1515
nprocs()
16-
workers = addprocs(5)
16+
workers = addprocs(5)
1717
@everywhere include(joinpath(@__DIR__, "workshop_module.jl"))
1818

1919
# creating paths for log files (logs), parameter sets (params) and hyperparameter plots (plots)
20-
for dir ("logs", "params", "plots")
20+
for dir ("logs", "params", "plots")
2121
path = joinpath(@__DIR__, dir)
2222
@info "Creating (if not already) path: $(path)"
2323
mkpath(path)
24-
end
25-
26-
beta1 = 1.0 .- exp10.(LinRange(-4,-1,4))
27-
beta2 = 1.0 .- exp10.(LinRange(-6,-1,6))
28-
29-
sampler = DistributedHyperOpt.Hyperband(;R=81, η=3, ressourceScale=1.0/81.0*NODE_Training.data.cumconsumption_t[end])
30-
optimization = DistributedHyperOpt.Optimization(NODE_Training.train!,
31-
DistributedHyperOpt.Parameter("eta", (1e-5, 1e-2); type=:Log, samples=7, round_digits=5),
32-
DistributedHyperOpt.Parameter("beta1", beta1),
33-
DistributedHyperOpt.Parameter("beta2", beta2),
34-
DistributedHyperOpt.Parameter("batchDur", (0.5, 20.0); samples=40, round_digits=1),
35-
DistributedHyperOpt.Parameter("lastWeight", (0.1, 1.0); samples=10, round_digits=1),
36-
DistributedHyperOpt.Parameter("schedulerID", [:Random, :Sequential, :LossAccumulation]),
37-
DistributedHyperOpt.Parameter("loss", [:MSE, :MAE]) )
38-
DistributedHyperOpt.optimize(optimization;
39-
sampler=sampler,
40-
plot=true,
41-
plot_ressources=true,
42-
save_plot=joinpath(@__DIR__, "plots", "hyperoptim.png"),
43-
redirect_worker_io_dir=joinpath(@__DIR__, "logs"))
44-
45-
Plots.plot(optimization; size=(1024, 1024), ressources=true)
24+
end
25+
26+
beta1 = 1.0 .- exp10.(LinRange(-4, -1, 4))
27+
beta2 = 1.0 .- exp10.(LinRange(-6, -1, 6))
28+
29+
sampler = DistributedHyperOpt.Hyperband(;
30+
R = 81,
31+
η = 3,
32+
ressourceScale = 1.0 / 81.0 * NODE_Training.data.cumconsumption_t[end],
33+
)
34+
optimization = DistributedHyperOpt.Optimization(
35+
NODE_Training.train!,
36+
DistributedHyperOpt.Parameter(
37+
"eta",
38+
(1e-5, 1e-2);
39+
type = :Log,
40+
samples = 7,
41+
round_digits = 5,
42+
),
43+
DistributedHyperOpt.Parameter("beta1", beta1),
44+
DistributedHyperOpt.Parameter("beta2", beta2),
45+
DistributedHyperOpt.Parameter("batchDur", (0.5, 20.0); samples = 40, round_digits = 1),
46+
DistributedHyperOpt.Parameter("lastWeight", (0.1, 1.0); samples = 10, round_digits = 1),
47+
DistributedHyperOpt.Parameter("schedulerID", [:Random, :Sequential, :LossAccumulation]),
48+
DistributedHyperOpt.Parameter("loss", [:MSE, :MAE]),
49+
)
50+
DistributedHyperOpt.optimize(
51+
optimization;
52+
sampler = sampler,
53+
plot = true,
54+
plot_ressources = true,
55+
save_plot = joinpath(@__DIR__, "plots", "hyperoptim.png"),
56+
redirect_worker_io_dir = joinpath(@__DIR__, "logs"),
57+
)
58+
59+
Plots.plot(optimization; size = (1024, 1024), ressources = true)
4660
minimum, minimizer, ressource = DistributedHyperOpt.results(optimization)

0 commit comments

Comments
 (0)