Skip to content

Commit 4028d0c

Browse files
authored
Adding a few introductory tutorials (#233)
* add a few beginner level tutorials * make changes to exclude the docs folder from ci
1 parent c63306d commit 4028d0c

15 files changed

+1078
-36
lines changed

.pre-commit-config.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ repos:
2929
"--ignore-magic", "--fail-under=99", "--exclude=['setup.py', 'test', 'build', 'docs']",
3030
"--ignore-regex=['forward', 'backward', 'reset_parameters', 'extra_repr', 'MetaData', 'apply_activation','exec_activation']",
3131
"--color", "--"]
32-
exclude: ^modulus/internal/|^modulus/experimental/
32+
exclude: ^modulus/internal/|^modulus/experimental/|^docs/
3333

3434
- repo: https://github.com/igorshubovych/markdownlint-cli
3535
rev: v0.35.0
@@ -49,6 +49,7 @@ repos:
4949
hooks:
5050
- id: ruff
5151
args: [--fix]
52+
exclude: ^docs/
5253

5354
- repo: https://github.com/pre-commit/pre-commit-hooks
5455
rev: v3.4.0

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ doctest:
3535
pytest:
3636
coverage run \
3737
--rcfile='test/coverage.pytest.rc' \
38-
-m pytest
38+
-m pytest --ignore-glob=*docs*
3939

4040
pytest-internal:
4141
cd test/internal && \

docs/api/modulus.models.rst

Lines changed: 6 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -248,6 +248,8 @@ complex models.
248248
refactored in the future.
249249

250250

251+
.. _modulus-models-from-torch:
252+
251253
Converting PyTorch Models to Modulus Models
252254
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
253255

@@ -284,6 +286,10 @@ below.
284286
ModulusModel = modulus.Module.from_torch(TorchModel, meta=ConvMetaData())
285287
286288
289+
290+
291+
.. _saving-and-loading-modulus-models:
292+
287293
Saving and Loading Modulus Models
288294
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
289295

@@ -500,37 +506,3 @@ DLWP Model
500506
:members:
501507
:show-inheritance:
502508

503-
Spherical Harmonics Fourier Neural Operator
504-
--------------------------------------------
505-
506-
.. automodule:: modulus.models.sfno.sfnonet
507-
:members:
508-
:show-inheritance:
509-
510-
.. automodule:: modulus.models.sfno.activations
511-
:members:
512-
:show-inheritance:
513-
514-
.. automodule:: modulus.models.sfno.factorizations
515-
:members:
516-
:show-inheritance:
517-
518-
.. automodule:: modulus.models.sfno.layers
519-
:members:
520-
:show-inheritance:
521-
522-
.. automodule:: modulus.models.sfno.s2convolutions
523-
:members:
524-
:show-inheritance:
525-
526-
.. automodule:: modulus.models.sfno.contractions
527-
:members:
528-
:show-inheritance:
529-
530-
.. automodule:: modulus.models.sfno.initialization
531-
:members:
532-
:show-inheritance:
533-
534-
.. automodule:: modulus.models.sfno.preprocessor
535-
:members:
536-
:show-inheritance:

docs/index.rst

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,13 @@
11
Welcome to Modulus Core's documentation!
22
========================================
33

4+
.. toctree::
5+
:maxdepth: 2
6+
:caption: Modulus Tutorials
7+
:name: Modulus Tutorials
8+
9+
tutorials/simple_training_example.rst
10+
tutorials/simple_logging_and_checkpointing.rst
411

512
.. toctree::
613
:maxdepth: 2

docs/test_runner.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import os
2+
import subprocess
3+
4+
import pytest
5+
6+
# Collecting all the Python files in the scripts directory
7+
script_files = [f for f in os.listdir("test_scripts/") if f.endswith('.py')]
8+
9+
#print(script_files)
10+
@pytest.mark.parametrize("script_file", script_files)
11+
def test_script_execution(script_file):
12+
"""Test if a script runs without error."""
13+
filepath = os.path.join("test_scripts/", script_file)
14+
print(filepath)
15+
result = subprocess.run(["python", filepath], capture_output=True, text=True)
16+
17+
# Check that the script executed successfully
18+
assert result.returncode == 0, f"Script {script_file} failed with error:\n{result.stderr}"

docs/test_scripts/test_basic.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# [imports]
2+
import torch
3+
4+
import modulus
5+
from modulus.datapipes.benchmarks.darcy import Darcy2D
6+
from modulus.metrics.general.mse import mse
7+
from modulus.models.fno.fno import FNO
8+
9+
# [imports]
10+
11+
# [code]
12+
normaliser = {
13+
"permeability": (1.25, 0.75),
14+
"darcy": (4.52e-2, 2.79e-2),
15+
}
16+
dataloader = Darcy2D(
17+
resolution=256, batch_size=64, nr_permeability_freq=5, normaliser=normaliser
18+
)
19+
model = FNO(
20+
in_channels=1,
21+
out_channels=1,
22+
decoder_layers=1,
23+
decoder_layer_size=32,
24+
dimension=2,
25+
latent_channels=32,
26+
num_fno_layers=4,
27+
num_fno_modes=12,
28+
padding=5,
29+
).to("cuda")
30+
31+
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
32+
scheduler = torch.optim.lr_scheduler.LambdaLR(
33+
optimizer, lr_lambda=lambda step: 0.85**step
34+
)
35+
36+
# run for 20 iterations
37+
for i in range(20):
38+
batch = next(iter(dataloader))
39+
true = batch["darcy"]
40+
pred = model(batch["permeability"])
41+
loss = mse(pred, true)
42+
loss.backward()
43+
optimizer.step()
44+
scheduler.step()
45+
46+
print(f"Iteration: {i}. Loss: {loss.detach().cpu().numpy()}")
47+
# [code]
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
# [imports]
2+
import torch
3+
4+
import modulus
5+
from modulus.datapipes.benchmarks.darcy import Darcy2D
6+
from modulus.launch.utils import load_checkpoint, save_checkpoint
7+
from modulus.metrics.general.mse import mse
8+
from modulus.models.fno.fno import FNO
9+
10+
# [imports]
11+
12+
# [code]
13+
normaliser = {
14+
"permeability": (1.25, 0.75),
15+
"darcy": (4.52e-2, 2.79e-2),
16+
}
17+
dataloader = Darcy2D(
18+
resolution=256, batch_size=64, nr_permeability_freq=5, normaliser=normaliser
19+
)
20+
model = FNO(
21+
in_channels=1,
22+
out_channels=1,
23+
decoder_layers=1,
24+
decoder_layer_size=32,
25+
dimension=2,
26+
latent_channels=32,
27+
num_fno_layers=4,
28+
num_fno_modes=12,
29+
padding=5,
30+
).to("cuda")
31+
32+
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
33+
scheduler = torch.optim.lr_scheduler.LambdaLR(
34+
optimizer, lr_lambda=lambda step: 0.85**step
35+
)
36+
37+
# load the epoch and optimizer, model ans scheduler parameters from the checkpoint if
38+
# it exists
39+
loaded_epoch = load_checkpoint(
40+
"./checkpoints",
41+
models=model,
42+
optimizer=optimizer,
43+
scheduler=scheduler,
44+
device="cuda",
45+
)
46+
47+
# we will setup the training to run for 20 epochs each epoch running for 5 iterations
48+
# starting with the loaded epoch
49+
for i in range(max(1, loaded_epoch), 20):
50+
# this would be iterations through different batches
51+
for _ in range(5):
52+
batch = next(iter(dataloader))
53+
true = batch["darcy"]
54+
pred = model(batch["permeability"])
55+
loss = mse(pred, true)
56+
loss.backward()
57+
optimizer.step()
58+
scheduler.step()
59+
60+
# save checkpoint every 5th epoch
61+
if i % 5 == 0:
62+
save_checkpoint(
63+
"./checkpoints",
64+
models=model,
65+
optimizer=optimizer,
66+
scheduler=scheduler,
67+
epoch=i,
68+
)
69+
# [code]
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# [imports]
2+
import torch
3+
4+
import modulus
5+
from modulus.models.fno.fno import FNO
6+
7+
# [imports]
8+
9+
# [code]
10+
11+
model = FNO(
12+
in_channels=1,
13+
out_channels=1,
14+
decoder_layers=1,
15+
decoder_layer_size=32,
16+
dimension=2,
17+
latent_channels=32,
18+
num_fno_layers=4,
19+
num_fno_modes=12,
20+
padding=5,
21+
).to("cuda")
22+
23+
# Save the checkpoint. For demo, we will just save untrained checkpoint,
24+
# but in typical workflows is saved after model training.
25+
model.save("untrained_checkpoint.mdlus")
26+
27+
# Inference code
28+
29+
# The parameters to instantitate the model will be loaded from the checkpoint
30+
model_inf = modulus.Module.from_checkpoint("untrained_checkpoint.mdlus").to("cuda")
31+
32+
# put the model in evaluation mode
33+
model_inf.eval()
34+
35+
# run inference
36+
with torch.inference_mode():
37+
input = torch.ones(8, 1, 256, 256).to("cuda")
38+
output = model_inf(input)
39+
print(output.shape)
40+
41+
# [code]
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
# [imports]
2+
import torch
3+
4+
import modulus
5+
from modulus.datapipes.benchmarks.darcy import Darcy2D
6+
from modulus.launch.logging import LaunchLogger, PythonLogger
7+
from modulus.metrics.general.mse import mse
8+
from modulus.models.fno.fno import FNO
9+
10+
# [imports]
11+
12+
# [code]
13+
normaliser = {
14+
"permeability": (1.25, 0.75),
15+
"darcy": (4.52e-2, 2.79e-2),
16+
}
17+
dataloader = Darcy2D(
18+
resolution=256, batch_size=64, nr_permeability_freq=5, normaliser=normaliser
19+
)
20+
model = FNO(
21+
in_channels=1,
22+
out_channels=1,
23+
decoder_layers=1,
24+
decoder_layer_size=32,
25+
dimension=2,
26+
latent_channels=32,
27+
num_fno_layers=4,
28+
num_fno_modes=12,
29+
padding=5,
30+
).to("cuda")
31+
32+
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
33+
scheduler = torch.optim.lr_scheduler.LambdaLR(
34+
optimizer, lr_lambda=lambda step: 0.85**step
35+
)
36+
37+
# Initialize the logger
38+
logger = PythonLogger("main") # General python logger
39+
LaunchLogger.initialize()
40+
41+
# Use logger methods to track various information during training
42+
logger.info("Starting Training!")
43+
44+
# we will setup the training to run for 20 epochs each epoch running for 5 iterations
45+
for i in range(20):
46+
# wrap the epoch in launch logger to control frequency of output for console logs
47+
with LaunchLogger("train", epoch=i) as launchlog:
48+
# this would be iterations through different batches
49+
for _ in range(5):
50+
batch = next(iter(dataloader))
51+
true = batch["darcy"]
52+
pred = model(batch["permeability"])
53+
loss = mse(pred, true)
54+
loss.backward()
55+
optimizer.step()
56+
scheduler.step()
57+
launchlog.log_minibatch({"Loss": loss.detach().cpu().numpy()})
58+
59+
launchlog.log_epoch({"Learning Rate": optimizer.param_groups[0]["lr"]})
60+
61+
62+
logger.info("Finished Training!")
63+
# [code]

0 commit comments

Comments
 (0)