diff --git a/.github/workflows/autopush.yml b/.github/workflows/autopush.yml new file mode 100644 index 0000000..f89b08a --- /dev/null +++ b/.github/workflows/autopush.yml @@ -0,0 +1,21 @@ +name: Gitlab mirror +on: + push: + branches: + - main + +jobs: + autopush: + name: Automatic push to gitlab.tiker.net + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: | + mkdir ~/.ssh && echo -e "Host gitlab.tiker.net\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config + eval $(ssh-agent) && echo "$GITLAB_AUTOPUSH_KEY" | ssh-add - + git fetch --unshallow + git push "git@gitlab.tiker.net:inducer/$(basename $GITHUB_REPOSITORY).git" main + env: + GITLAB_AUTOPUSH_KEY: ${{ secrets.GITLAB_AUTOPUSH_KEY }} + +# vim: sw=4 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 313cdf7..a315f31 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,7 @@ name: CI on: push: branches: - - master + - main pull_request: paths-ignore: - 'doc/*.rst' @@ -18,10 +18,11 @@ jobs: - uses: actions/setup-python@v1 with: - python-version: '3.x' + # matches compat target in setup.py + python-version: '3.6' - name: "Main Script" run: | - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/prepare-and-run-flake8.sh + curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/prepare-and-run-flake8.sh . ./prepare-and-run-flake8.sh "$(basename $GITHUB_REPOSITORY)" test examples pylint: @@ -35,7 +36,7 @@ jobs: python-version: '3.x' - name: "Main Script" run: | - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/prepare-and-run-pylint.sh + curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/prepare-and-run-pylint.sh . ./prepare-and-run-pylint.sh "$(basename $GITHUB_REPOSITORY)" test examples pytest: @@ -43,7 +44,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.6, 3.7, 3.8] + python-version: ["3.6", "3.8", "3.x"] steps: - uses: actions/checkout@v2 - @@ -56,15 +57,33 @@ jobs: sudo apt update sudo apt install gfortran-7 liblapack-dev libblas-dev sudo ln -sf /usr/bin/gfortran-7 /usr/bin/gfortran - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh + curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/build-and-test-py-project.sh . ./build-and-test-py-project.sh + pytest_conda: + name: Pytest Conda + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: "Main Script" + run: | + CONDA_ENVIRONMENT=.test-conda-env-py3.yml + curl -L -O -k https://tiker.net/ci-support-v0 + . ./ci-support-v0 + build_py_project_in_conda_env + + sudo apt update + sudo apt install gfortran-7 liblapack-dev libblas-dev + sudo ln -sf /usr/bin/gfortran-7 /usr/bin/gfortran + + test_py_project + examples: name: Examples on Py${{ matrix.python-version }} runs-on: ubuntu-latest strategy: matrix: - python-version: [3.6, 3.7, 3.8] + python-version: ["3.6", "3.8", "3.x"] steps: - uses: actions/checkout@v2 - @@ -74,12 +93,32 @@ jobs: - name: "Main Script" run: | EXTRA_INSTALL="numpy matplotlib scipy" + sudo apt update sudo apt install gfortran-7 liblapack-dev libblas-dev sudo ln -sf /usr/bin/gfortran-7 /usr/bin/gfortran - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-py-project-and-run-examples.sh + + curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/build-py-project-and-run-examples.sh . ./build-py-project-and-run-examples.sh + examples_conda: + name: Examples Conda + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: "Main Script" + run: | + CONDA_ENVIRONMENT=.test-conda-env-py3.yml + curl -L -O -k https://tiker.net/ci-support-v0 + . ./ci-support-v0 + build_py_project_in_conda_env + + sudo apt update + sudo apt install gfortran-7 liblapack-dev libblas-dev + sudo ln -sf /usr/bin/gfortran-7 /usr/bin/gfortran + + run_examples + docs: name: Documentation runs-on: ubuntu-latest @@ -92,7 +131,7 @@ jobs: - name: "Main Script" run: | EXTRA_INSTALL="numpy" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/ci-support.sh + curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/ci-support.sh . ci-support.sh build_py_project_in_venv build_docs diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 94ad826..9d95c7a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,7 +2,7 @@ Python 3: script: - py_version=3 - EXTRA_INSTALL="numpy scipy" - - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-and-test-py-project.sh + - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/build-and-test-py-project.sh - ". ./build-and-test-py-project.sh" tags: - python3 @@ -17,7 +17,7 @@ Python 3 Examples: script: - py_version=3 - EXTRA_INSTALL="numpy scipy matplotlib" - - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-py-project-and-run-examples.sh + - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/build-py-project-and-run-examples.sh - ". ./build-py-project-and-run-examples.sh" tags: - python3 @@ -31,7 +31,7 @@ Pylint: script: | export PY_EXE=python3 EXTRA_INSTALL="numpy scipy" - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/prepare-and-run-pylint.sh + curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/prepare-and-run-pylint.sh . ./prepare-and-run-pylint.sh "$CI_PROJECT_NAME" test examples tags: - python3 @@ -41,14 +41,14 @@ Pylint: Documentation: script: - EXTRA_INSTALL="numpy" - - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/build-docs.sh + - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/build-docs.sh - ". ./build-docs.sh" tags: - python3 Flake8: script: | - curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/master/prepare-and-run-flake8.sh + curl -L -O -k https://gitlab.tiker.net/inducer/ci-support/raw/main/prepare-and-run-flake8.sh . ./prepare-and-run-flake8.sh "$CI_PROJECT_NAME" test examples tags: - python3 diff --git a/.test-conda-env-py3.yml b/.test-conda-env-py3.yml new file mode 100644 index 0000000..2331cb1 --- /dev/null +++ b/.test-conda-env-py3.yml @@ -0,0 +1,13 @@ +name: test-conda-env +channels: +- conda-forge +- nodefaults + +dependencies: +- python=3 +- git +- numpy +- scipy +- matplotlib-base + +- pip diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..f1123e1 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,11 @@ +recursive-include examples *.py + +include test/*.f90 +include test/conftest.py +include test/utils.py +include test/*_test_systems.py + +include doc/*.rst +include doc/conf.py +include doc/Makefile +include doc/make.bat diff --git a/README.rst b/README.rst index 6733b88..50f2cac 100644 --- a/README.rst +++ b/README.rst @@ -1,12 +1,12 @@ leap: Descriptive Time Integration with Flexbile Multi-Rate Algorithms ====================================================================== -.. image:: https://gitlab.tiker.net/inducer/leap/badges/master/pipeline.svg +.. image:: https://gitlab.tiker.net/inducer/leap/badges/main/pipeline.svg :alt: Gitlab Build Status - :target: https://gitlab.tiker.net/inducer/leap/commits/master -.. image:: https://github.com/inducer/leap/workflows/CI/badge.svg?branch=master&event=push + :target: https://gitlab.tiker.net/inducer/leap/commits/main +.. image:: https://github.com/inducer/leap/workflows/CI/badge.svg?branch=main&event=push :alt: Github Build Status - :target: https://github.com/inducer/leap/actions?query=branch%3Amaster+workflow%3ACI+event%3Apush + :target: https://github.com/inducer/leap/actions?query=branch%3Amain+workflow%3ACI+event%3Apush .. image:: https://badge.fury.io/py/leap.png :alt: Python Package Index Release Page :target: https://pypi.org/project/leap/ diff --git a/doc/conf.py b/doc/conf.py index 8921df5..1f1c01c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,59 +1,11 @@ -# leap documentation build configuration file, created by -# sphinx-quickstart on Sun Jan 26 10:09:04 2014. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. +from urllib.request import urlopen -import sys # noqa -import os # noqa +_conf_url = \ + "https://raw.githubusercontent.com/inducer/sphinxconfig/main/sphinxconfig.py" +with urlopen(_conf_url) as _inf: + exec(compile(_inf.read(), _conf_url, "exec"), globals()) -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.doctest", - "sphinx.ext.intersphinx", - "sphinx.ext.mathjax", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix of source filenames. -source_suffix = ".rst" - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = "leap" -copyright = "2014-6, Matt Wala and Andreas Kloeckner" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. +copyright = "2014-20, Matt Wala and Andreas Kloeckner" ver_dic = {} _version_source = "../leap/version.py" @@ -66,222 +18,9 @@ release = ver_dic["VERSION_TEXT"] version = release -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -html_theme = "alabaster" - -html_theme_options = { - "extra_nav_links": { - "🚀 Code/Bugs": "https://gitlab.tiker.net/inducer/leap", - "💾 Download Releases": "https://pypi.python.org/pypi/leap", - } - } - -html_sidebars = { - "**": [ - "about.html", - "navigation.html", - "relations.html", - "searchbox.html", - ] -} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = "leapdoc" - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - #'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ("index", "leap.tex", "leap Documentation", - "Andreas Kloeckner", "manual"), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ("index", "leap", "leap Documentation", - ["Andreas Kloeckner"], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ("index", "leap", "leap Documentation", - "Andreas Kloeckner", "leap", "One line description of project.", - "Miscellaneous"), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - "https://docs.python.org/": None, - "https://docs.scipy.org/doc/numpy/": None, + "https://docs.python.org/3/": None, + "https://numpy.org/doc/stable/": None, "https://documen.tician.de/pymbolic/": None, "https://documen.tician.de/dagrt/": None, } diff --git a/doc/index.rst b/doc/index.rst index 3215714..c58a951 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -5,12 +5,13 @@ TODO: Insert example here. :mod:`leap` is based on :mod:`dagrt`. - .. toctree:: :maxdepth: 2 reference misc + 🚀 Github + 💾 Download Releases * :ref:`genindex` * :ref:`modindex` diff --git a/doc/reference.rst b/doc/reference.rst index dbb7ffb..cc1318d 100644 --- a/doc/reference.rst +++ b/doc/reference.rst @@ -1,7 +1,7 @@ Reference ========= -.. module:: leap +.. automodule:: leap Runge-Kutta Methods ------------------- diff --git a/examples/implicit_euler/test_implicit_euler.py b/examples/implicit_euler/test_implicit_euler.py index 5c0b911..709bb7a 100755 --- a/examples/implicit_euler/test_implicit_euler.py +++ b/examples/implicit_euler/test_implicit_euler.py @@ -59,7 +59,7 @@ def test_im_euler_accuracy(python_method_impl, show_dag=False, plot_solution=False): component_id = "y" - from implicit_euler import ImplicitEulerMethodBuilder + from .implicit_euler import ImplicitEulerMethodBuilder method = ImplicitEulerMethodBuilder(component_id) code = method.generate(solver_hook) diff --git a/examples/variable-coeff-wave-equation/wave-equation.py b/examples/variable-coeff-wave-equation/wave-equation.py index 8f1d36a..150f00c 100755 --- a/examples/variable-coeff-wave-equation/wave-equation.py +++ b/examples/variable-coeff-wave-equation/wave-equation.py @@ -120,8 +120,7 @@ def __init__(self, ngridpoints, component_coeffs, decay=0, diffusion=0): component_indices = [] component_sizes = [] - for i, (start, stop) in enumerate( - zip(component_times, component_times[1:])): + for start, stop in zip(component_times, component_times[1:]): indices = (start < grid) & (grid < stop) component_indices.append(indices) component_sizes.append(2 * sum(indices)) diff --git a/leap/__init__.py b/leap/__init__.py index 9d1d86b..db10290 100644 --- a/leap/__init__.py +++ b/leap/__init__.py @@ -1,7 +1,12 @@ -"""Leap root module""" +""" +.. autoclass:: MethodBuilder +""" -__copyright__ = "Copyright (C) 2014 Andreas Kloeckner" +__copyright__ = """ +Copyright (C) 2014 Andreas Kloeckner +CopyRight (C) 2020 Cory Mikida +""" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy @@ -24,6 +29,9 @@ """ +import dagrt.language + + # {{{ command-line generator def run_script_from_commandline(): @@ -52,15 +60,19 @@ def run_script_from_commandline(): # {{{ method builder base class class MethodBuilder: + """An abstract base class for method implementations that generate + code for :mod:`dagrt`. - def generate(self, *solver_hooks): + .. automethod:: generate + .. automethod:: implicit_expression + """ + + def generate(self, *solver_hooks) -> dagrt.language.DAGCode: """ Generate a method description. :arg solver_hooks: A list of callbacks that generate expressions - for calling user-supplied implicit solvers - - :return: A `DAGCode` instance + for calling user-supplied implicit solvers """ raise NotImplementedError() @@ -70,10 +82,9 @@ def implicit_expression(self, expression_tag=None): instances will follow. :arg expression_tag: A name for the expression, if multiple - expressions are present in the generated code. - - :return: A tuple consisting of :mod:`pymbolic` expressions and - the names of the free variables in the expressions. + expressions are present in the generated code. + :returns: A tuple consisting of :mod:`pymbolic` expressions and + the names of the free variables in the expressions. """ raise NotImplementedError() @@ -82,6 +93,7 @@ def implicit_expression(self, expression_tag=None): # {{{ two-order adaptivity + class TwoOrderAdaptiveMethodBuilderMixin(MethodBuilder): """ This class expected the following members to be defined: state, t, dt. @@ -155,6 +167,90 @@ def norm(expr): # }}} +# {{{ one-order adaptivity + +class OneOrderAdaptiveMethodBuilderMixin(MethodBuilder): + """ + This class expected the following members to be defined: state, t, dt. + """ + + def __init__(self, atol=0, rtol=0, max_dt_growth=None, min_dt_shrinkage=None): + self.adaptive = bool(atol or rtol) + self.atol = atol + self.rtol = rtol + + if max_dt_growth is None: + max_dt_growth = 5 + + if min_dt_shrinkage is None: + min_dt_shrinkage = 0.1 + + self.max_dt_growth = max_dt_growth + self.min_dt_shrinkage = min_dt_shrinkage + + # Error constants for Adams methods + self.c_exp = [1/2, 5/12, 3/8, 251/720] + self.c_imp = [-1/2, -1/12, -1/24, -19/720] + + def finish_nonadaptive(self, cb, high_order_estimate, low_order_estimate, + rhss, time_data): + raise NotImplementedError() + + def finish_adaptive(self, cb, high_order_estimate, low_order_estimate, + rhss, time_data): + from pymbolic import var + from pymbolic.primitives import Comparison, LogicalOr, Max, Min + from dagrt.expression import IfThenElse + + norm_start_state = var("norm_start_state") + norm_end_state = var("norm_end_state") + rel_error_raw = var("rel_error_raw") + rel_error = var("rel_error") + + def norm(expr): + return var("norm_2")(expr) + + cb(norm_start_state, norm(self.state)) + cb(norm_end_state, norm(low_order_estimate)) + cb(rel_error_raw, abs(self.c_imp[self.function_family.order-1]) + * norm(high_order_estimate - low_order_estimate) + / (abs(self.c_exp[self.function_family.order-1] + - self.c_imp[self.function_family.order-1]) + * ((self.atol + self.rtol + * Max((norm_start_state, norm_end_state)))) + )) + + cb(rel_error, IfThenElse(Comparison(rel_error_raw, "==", 0), + 1.0e-14, rel_error_raw)) + + with cb.if_(LogicalOr((Comparison(rel_error, ">", 1), + var("isnan")(rel_error)))): + + with cb.if_(var("isnan")(rel_error)): + cb(self.dt, self.min_dt_shrinkage * self.dt) + with cb.else_(): + cb(self.dt, Max((0.9 * self.dt + * rel_error ** (-1 / (self.function_family.order)), + self.min_dt_shrinkage * self.dt))) + + with cb.if_(self.t + self.dt, "==", self.t): + cb.raise_(TimeStepUnderflow) + with cb.else_(): + cb.fail_step() + + with cb.else_(): + # This updates :
should not be set before this is called. + self.finish_nonadaptive(cb, high_order_estimate, low_order_estimate, + rhss, time_data) + + cb(self.dt, + Min((0.9 * self.dt * rel_error + ** (-1 / (self.function_family.order + 1)), + self.max_dt_growth * self.dt))) + +# }}} + + # {{{ diagnostics class TimeStepUnderflow(RuntimeError): diff --git a/leap/multistep/__init__.py b/leap/multistep/__init__.py index 34dc88b..1a33aff 100644 --- a/leap/multistep/__init__.py +++ b/leap/multistep/__init__.py @@ -29,7 +29,7 @@ import numpy as np import numpy.linalg as la -from leap import MethodBuilder +from leap import MethodBuilder, OneOrderAdaptiveMethodBuilderMixin from pymbolic import var @@ -222,7 +222,7 @@ class AdamsMethodBuilder(MethodBuilder): """ def __init__(self, component_id, function_family=None, state_filter_name=None, - hist_length=None, static_dt=False, order=None, _extra_bootstrap=False): + hist_length=None, static_dt=False, order=None): """ :arg function_family: Accepts an instance of :class:`AdamsIntegrationFunctionFamily` @@ -250,7 +250,6 @@ def __init__(self, component_id, function_family=None, state_filter_name=None, self.hist_length = hist_length self.static_dt = static_dt - self.extra_bootstrap = _extra_bootstrap self.component_id = component_id @@ -307,7 +306,7 @@ def generate(self): component_id=self.component_id, time_id="", time=self.t) cb_bootstrap(self.step, self.step + 1) - bootstrap_length = self.determine_bootstrap_length() + bootstrap_length = self.hist_length with cb_bootstrap.if_(self.step, "==", bootstrap_length): cb_bootstrap.switch_phase("primary") @@ -339,32 +338,35 @@ def rotate_and_yield(self, cb, hist, time_hist): component_id=self.component_id, time_id="", time=self.t) - def set_up_time_history(self, cb, new_t): + def set_up_time_data(self, cb, new_t): from pytools import UniqueNameGenerator name_gen = UniqueNameGenerator() array = var("array") if not self.static_dt: - time_history_data = self.time_history + [new_t] - time_hist_var = var(name_gen("time_history")) - cb(time_hist_var, array(self.hist_length)) + time_data = self.time_history + [new_t] + time_data_var = var(name_gen("time_data")) + cb(time_data_var, array(self.hist_length)) for i in range(self.hist_length): - cb(time_hist_var[i], time_history_data[i] - self.t) + cb(time_data_var[i], time_data[i] - self.t) - time_hist = time_hist_var + relv_times = time_data_var t_end = self.dt dt_factor = 1 else: if new_t == self.t: - time_hist = list(range(-self.hist_length+1, 0+1)) # noqa pylint:disable=invalid-unary-operand-type - time_history_data = list(range(-self.hist_length+1, 0+1)) # noqa pylint:disable=invalid-unary-operand-type + relv_times = list(range(-self.hist_length+1, 0+1)) # noqa pylint:disable=invalid-unary-operand-type + time_data = list(range(-self.hist_length+1, 0+1)) # noqa pylint:disable=invalid-unary-operand-type else: - time_hist = list(range(-self.hist_length+2, 0+2)) # noqa pylint:disable=invalid-unary-operand-type - time_history_data = list(range(-self.hist_length+2, 0+2)) # noqa pylint:disable=invalid-unary-operand-type + # In implicit mode, the vector of times + # passed to adams_integration must + # include the *next* point in time. + relv_times = list(range(-self.hist_length+2, 0+2)) # noqa pylint:disable=invalid-unary-operand-type + time_data = list(range(-self.hist_length+2, 0+2)) # noqa pylint:disable=invalid-unary-operand-type dt_factor = self.dt t_end = 1 - return time_history_data, time_hist, dt_factor, t_end + return time_data, relv_times, dt_factor, t_end def generate_primary(self, cb): raise NotImplementedError() @@ -372,9 +374,6 @@ def generate_primary(self, cb): def rk_bootstrap(self, cb): raise NotImplementedError() - def determine_bootstrap_length(self): - raise NotImplementedError() - # }}} @@ -387,7 +386,7 @@ def generate_primary(self, cb): name_gen = UniqueNameGenerator() time_history_data, time_hist, \ - dt_factor, t_end = self.set_up_time_history(cb, self.t) + dt_factor, t_end = self.set_up_time_data(cb, self.t) cb(rhs_var, self.eval_rhs(self.t, self.state)) history = self.history + [rhs_var] @@ -445,13 +444,6 @@ def rk_bootstrap(self, cb): # Assign the value of the new state. cb(self.state, est_vars[0]) - def determine_bootstrap_length(self): - - # In the explicit case, this is always - # equal to history length. - bootstrap_length = self.hist_length - - return bootstrap_length # }}} @@ -468,10 +460,11 @@ def generate_primary(self, cb): unkvar = cb.fresh_var("unk") rhs_var_to_unknown[rhs_next_var] = unkvar - # In implicit mode, the time history must + # In implicit mode, the vector of times + # passed to adams_integration must # include the *next* point in time. - time_history_data, time_hist, \ - dt_factor, t_end = self.set_up_time_history(cb, self.t + self.dt) + time_data, relv_times, \ + dt_factor, t_end = self.set_up_time_data(cb, self.t + self.dt) # Implicit setup - rhs_next_var is an unknown, needs implicit solve. equations = [] @@ -480,14 +473,16 @@ def generate_primary(self, cb): unknowns.add(rhs_next_var) - # Update history - history = self.history + [rhs_next_var] + # Create RHS vector for Adams setup, + # including RHS value to be implicitly + # solved for + rhss = self.history + [rhs_next_var] # Set up the actual Adams-Moulton step. am_sum = emit_adams_integration( cb, name_gen, self.function_family, - time_hist, history, + relv_times, rhss, 0, t_end) state_est = self.state + dt_factor * am_sum @@ -507,6 +502,14 @@ def generate_primary(self, cb): if unknowns and len(unknowns) == len(equations): from leap.implicit import generate_solve generate_solve(cb, unknowns, equations, rhs_var_to_unknown, self.state) + elif not unknowns: + raise ValueError("Adams-Moulton implicit timestep has no unknowns") + elif len(unknowns) > len(equations): + raise ValueError("Adams-Moulton implicit timestep has more unknowns " + "than equations") + elif len(unknowns) < len(equations): + raise ValueError("Adams-Moulton implicit timestep has more equations " + "than unknowns") del equations[:] knowns.update(unknowns) @@ -519,8 +522,9 @@ def generate_primary(self, cb): state_est = self.state_filter(state_est) cb(self.state, state_est) - # Rotate history and time history. - self.rotate_and_yield(cb, history, time_history_data) + # Add new RHS and time to history and rotate. + history = self.history + [rhs_next_var] + self.rotate_and_yield(cb, history, time_data) def rk_bootstrap(self, cb): """Initialize the timestepper with an IMPLICIT RK method.""" @@ -534,21 +538,6 @@ def rk_bootstrap(self, cb): estimate_coeff_sets = {"main": rk_coeffs} rhs_funcs = {"implicit": var(""+self.component_id)} - if self.extra_bootstrap: - first_save_step = 2 - else: - first_save_step = 1 - - with cb.if_(self.step, "==", first_save_step): - # Save the first RHS to the AM history - rhs_var = var("rhs_var") - - cb(rhs_var, self.eval_rhs(self.t, self.state)) - cb(self.history[0], rhs_var) - - if not self.static_dt: - cb(self.time_history[0], self.t) - # Traverse RK stage loop of appropriate order and update state. rk = rk_method(self.component_id, self.state_filter_name) cb = rk.generate_butcher_init(cb, stage_coeff_set_names, @@ -568,30 +557,261 @@ def rk_bootstrap(self, cb): cb(rhs_next_var, self.eval_rhs(self.t + self.dt, self.state)) - for i in range(1, len(self.history)): - if self.extra_bootstrap: - save_crit = i+1 - else: - save_crit = i + for i in range(len(self.history)): - with cb.if_(self.step, "==", save_crit): + with cb.if_(self.step, "==", i + 1): cb(self.history[i], rhs_next_var) if not self.static_dt: cb(self.time_history[i], self.t + self.dt) - def determine_bootstrap_length(self): - # In the implicit case, this is - # equal to history length - 1, unless - # we want an extra bootstrap step for - # comparison with explicit methods. - if self.extra_bootstrap: - bootstrap_length = self.hist_length +# }}} + + +# {{{ embedded method w/adaptivity + + +class EmbeddedAdamsMethodBuilder( + AdamsMethodBuilder, OneOrderAdaptiveMethodBuilderMixin): + """ + User-supplied context: + + component_id: The value that is integrated + + component_id: The right hand side function + """ + + def __init__(self, component_id, function_family=None, state_filter_name=None, + hist_length=None, static_dt=False, order=None, _extra_bootstrap=False, + use_high_order=False, atol=0, rtol=0, max_dt_growth=None, + min_dt_shrinkage=None): + """ + :arg function_family: Accepts an instance of + :class:`AdamsIntegrationFunctionFamily` + or an integer, in which case the classical monomial function family + with the order given by the integer is used. + :arg static_dt: If *True*, changing the timestep during time integration + is not allowed. + """ + + if function_family is not None and order is not None: + raise ValueError("may not specify both function_family and order") + + if function_family is None: + function_family = order + del order + + if isinstance(function_family, int): + function_family = AdamsMonomialIntegrationFunctionFamily(function_family) + + self.function_family = function_family + + if hist_length is None: + hist_length = len(function_family) + 1 + + # Check for reasonable history length. + if hist_length < len(function_family) + 1: + raise ValueError("Invalid history length specified for embedded Adams") + + self.hist_length = hist_length + + # If adaptivity is on, we can't have a static timestep. + if atol or rtol: + if static_dt is True: + raise ValueError("Can't have static timestepping with adaptivity") + + self.static_dt = static_dt + self.extra_bootstrap = _extra_bootstrap + + self.component_id = component_id + + # Declare variables + self.step = var("

step") + self.function = var("" + component_id) + self.history = \ + [var("

f_n_minus_" + str(i)) for i in range(hist_length - 1, 0, -1)] + + if not self.static_dt: + self.time_history = [ + var("

t_n_minus_" + str(i)) + for i in range(hist_length - 1, 0, -1)] + + self.state = var("" + component_id) + self.t = var("") + self.dt = var("

") + + self.state_filter_name = state_filter_name + if state_filter_name is not None: + self.state_filter = var("" + state_filter_name) else: - bootstrap_length = self.hist_length - 1 + self.state_filter = None + + OneOrderAdaptiveMethodBuilderMixin.__init__( + self, + atol=atol, + rtol=rtol, + max_dt_growth=max_dt_growth, + min_dt_shrinkage=min_dt_shrinkage) + + self.use_high_order = use_high_order + + def generate_primary(self, cb): + + from pytools import UniqueNameGenerator + name_gen = UniqueNameGenerator() + array = var("array") + rhs_next_var = var("rhs_next_var") + rhs_var_to_unknown = {} + unkvar = cb.fresh_var("unk") + rhs_var_to_unknown[rhs_next_var] = unkvar - return bootstrap_length + # In implicit mode, the vector of times + # passed to adams_integration must + # include the *next* point in time. + time_data, relv_times, \ + dt_factor, t_end = self.set_up_time_data(cb, self.t + self.dt) + + # Implicit setup - rhs_next_var is an unknown, needs implicit solve. + equations = [] + unknowns = set() + knowns = set() + + unknowns.add(rhs_next_var) + + # Create RHS vector for Adams setup, + # including RHS value to be implicitly + # solved for + rhss = self.history + [rhs_next_var] + + # Create data to feed to AB. + rhss_ab = rhss[:-1] + relv_times_ab_var = var(name_gen("times_ab")) + cb(relv_times_ab_var, array(self.hist_length-1)) + for i in range(self.hist_length-1): + cb(relv_times_ab_var[i], relv_times[i]) + + relv_times_ab = relv_times_ab_var + + # Create data to feed to AM. + rhss_am = rhss[1:] + relv_times_am_var = var(name_gen("times_am")) + cb(relv_times_am_var, array(self.hist_length-1)) + for i in range(self.hist_length-1): + cb(relv_times_am_var[i], relv_times[i+1]) + + relv_times_am = relv_times_am_var + + # Set up the actual Adams-Moulton step. + am_sum = emit_adams_integration( + cb, name_gen, + self.function_family, + relv_times_am, rhss_am, + 0, t_end) + + ab_sum = emit_adams_integration( + cb, name_gen, + self.function_family, + relv_times_ab, rhss_ab, + 0, t_end) + + state_est_pred = self.state + dt_factor * ab_sum + state_est_corr = self.state + dt_factor * am_sum + + # Build the implicit solve expression. + from dagrt.expression import collapse_constants + from pymbolic.mapper.distributor import DistributeMapper as DistMap + solve_expression = collapse_constants( + rhs_next_var - self.eval_rhs(self.t + self.dt, + DistMap()(state_est_corr)), + list(unknowns) + [self.state], + cb.assign, cb.fresh_var) + equations.append(solve_expression) + + # {{{ emit solve if possible + + if unknowns and len(unknowns) == len(equations): + from leap.implicit import generate_solve + generate_solve(cb, unknowns, equations, + rhs_var_to_unknown, state_est_pred) + elif not unknowns: + raise ValueError("Adaptive Adams implicit timestep has no unknowns") + elif len(unknowns) > len(equations): + raise ValueError("Adaptive Adams implicit timestep has more unknowns " + "than equations") + elif len(unknowns) < len(equations): + raise ValueError("Adaptive Adams implicit timestep has more equations " + "than unknowns") + + del equations[:] + knowns.update(unknowns) + unknowns.clear() + + # }}} + + # Update the state now that we've solved. + if self.state_filter is not None: + state_est_pred = self.state_filter(state_est_pred) + state_est_corr = self.state_filter(state_est_corr) + + # Finish needs to intervene here. + self.finish(cb, state_est_corr, state_est_pred, rhss, time_data) + + def finish(self, cb, high_est, low_est, rhss, time_data): + if not self.adaptive: + cb(self.state, low_est) + # Rotate history and time history. + self.rotate_and_yield(cb, rhss, time_data) + else: + self.finish_adaptive(cb, high_est, low_est, rhss, time_data) + + def finish_nonadaptive(self, cb, high_order_estimate, + low_order_estimate, rhss, time_data): + if self.use_high_order: + est = high_order_estimate + else: + est = low_order_estimate + + cb(self.state, est) + # Rotate history and time history. + self.rotate_and_yield(cb, rhss, time_data) + + def rk_bootstrap(self, cb): + """Initialize the timestepper with an RK method.""" + + rhs_var = var("rhs_var") + + cb(rhs_var, self.eval_rhs(self.t, self.state)) + + # Save the current RHS to the AB history + + for i in range(len(self.history)): + with cb.if_(self.step, "==", i + 1): + cb(self.history[i], rhs_var) + + if not self.static_dt: + cb(self.time_history[i], self.t) + + from leap.rk import ORDER_TO_RK_METHOD_BUILDER + rk_method = ORDER_TO_RK_METHOD_BUILDER[self.function_family.order] + rk_coeffs = rk_method.output_coeffs + stage_coeff_set_names = ("explicit",) + stage_coeff_sets = {"explicit": rk_method.a_explicit} + estimate_coeff_set_names = ("main",) + estimate_coeff_sets = {"main": rk_coeffs} + rhs_funcs = {"explicit": var(""+self.component_id)} + + # Traverse RK stage loop of appropriate order and update state. + rk = rk_method(self.component_id, self.state_filter_name) + cb = rk.generate_butcher_init(cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) + cb, rhss, est_vars = rk.generate_butcher_primary(cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) + + # Assign the value of the new state. + cb(self.state, est_vars[0]) # }}} diff --git a/leap/multistep/multirate/__init__.py b/leap/multistep/multirate/__init__.py index 092de91..374b931 100644 --- a/leap/multistep/multirate/__init__.py +++ b/leap/multistep/multirate/__init__.py @@ -293,7 +293,7 @@ def __init__(self, default_order, system_description, if state_filter_names is None: state_filter_names = {} - for comp_name, sfname in state_filter_names.items(): + for comp_name, _sfname in state_filter_names.items(): if comp_name not in component_names: raise ValueError(f"component name '{comp_name}' in " "'state_filter_names' not known") @@ -425,7 +425,7 @@ def make_stage_history(prefix): if not self.is_ode_component[comp_name]: continue - for irhs, rhs in enumerate(component_rhss): + for irhs in range(len(component_rhss)): stage_rhss[comp_name, irhs] = make_stage_history( "{name_prefix}_rk_{comp_name}_rhs{irhs}" .format( @@ -441,21 +441,20 @@ def make_stage_history(prefix): if not self.is_ode_component[comp_name]: continue - for irhs, rhs in enumerate(component_rhss): + for irhs in range(len(component_rhss)): cb(stage_rhss[comp_name, irhs][istage], rhss_on_entry[comp_name, irhs]) else: component_state_ests = {} - for icomp, (comp_name, component_rhss) in enumerate( - zip(self.component_names, self.rhss)): - + for comp_name, component_rhss in zip( + self.component_names, self.rhss): if not self.is_ode_component[comp_name]: continue contribs = [] - for irhs, rhs in enumerate(component_rhss): + for irhs in range(len(component_rhss)): state_contrib_var = var( name_gen( "state_contrib_{comp_name}_rhs{irhs}" @@ -492,10 +491,10 @@ def make_stage_history(prefix): contribs = [] - for irhs, rhs in enumerate(component_rhss): + for rhs in component_rhss: kwargs = { self.comp_name_to_kwarg_name[arg_comp_name]: - component_state_ests[arg_comp_name] + component_state_ests[arg_comp_name] for arg_comp_name in rhs.arguments} contribs.append(var(rhs.func_name)( @@ -524,7 +523,7 @@ def make_stage_history(prefix): for irhs, rhs in enumerate(component_rhss): kwargs = { self.comp_name_to_kwarg_name[arg_comp_name]: - component_state_ests[arg_comp_name] + component_state_ests[arg_comp_name] for arg_comp_name in rhs.arguments} cb(stage_rhss[comp_name, irhs][istage], var(rhs.func_name)( @@ -535,11 +534,10 @@ def make_stage_history(prefix): component_state_ests = {} - for icomp, (comp_name, component_rhss) in enumerate( - zip(self.component_names, self.rhss)): + for comp_name, component_rhss in zip(self.component_names, self.rhss): contribs = [] - for irhs, rhs in enumerate(component_rhss): + for irhs in range(len(component_rhss)): if not self.is_ode_component[comp_name]: continue @@ -619,7 +617,7 @@ def emit_rk_bootstrap(self, cb): kwargs = { self.comp_name_to_kwarg_name[arg_comp_name]: - var("" + arg_comp_name) + var("" + arg_comp_name) for arg_comp_name in rhs.arguments} cb(rhs_var, var(rhs.func_name)(t=self.t, **kwargs)) @@ -659,7 +657,7 @@ def get_state(comp_name): kwargs = { self.comp_name_to_kwarg_name[arg_comp_name]: - get_state(arg_comp_name) + get_state(arg_comp_name) for arg_comp_name in rhs.arguments} cb(rhs_var, var(rhs.func_name)(t=self.t, **kwargs)) @@ -906,7 +904,7 @@ def update_hist(comp_idx, irhs, isubstep): kwargs = { self.comp_name_to_kwarg_name[arg_comp_name]: - get_state(arg_comp_name, isubstep) + get_state(arg_comp_name, isubstep) for arg_comp_name in rhs.arguments} # }}} @@ -945,7 +943,7 @@ def update_hist(comp_idx, irhs, isubstep): for other_comp_name, other_component_rhss in zip( self.component_names, self.rhss): do_invalidate = False - for other_rhs in enumerate(other_component_rhss): + for _other_rhs in enumerate(other_component_rhss): if comp_name in rhs.arguments: do_invalidate = True break @@ -971,13 +969,12 @@ def norm(expr): def check_history_consistency(): # At the start of a macrostep, ensure that the last computed # RHS history corresponds to the current state - for comp_idx, (comp_name, component_rhss) in enumerate( - zip(self.component_names, self.rhss)): + for comp_name, component_rhss in zip(self.component_names, self.rhss): for irhs, rhs in enumerate(component_rhss): t_expr = self.t kwargs = { self.comp_name_to_kwarg_name[arg_comp_name]: - get_state(arg_comp_name, 0) + get_state(arg_comp_name, 0) for arg_comp_name in rhs.arguments} test_rhs_var = var( name_gen( diff --git a/leap/rk/__init__.py b/leap/rk/__init__.py index 550891c..f8a32e5 100644 --- a/leap/rk/__init__.py +++ b/leap/rk/__init__.py @@ -331,6 +331,15 @@ def make_known(v): from leap.implicit import generate_solve generate_solve(cb, unknowns, equations, rhs_var_to_unknown, self.state) + elif not unknowns: + # we have an explicit Runge-Kutta method + pass + elif len(unknowns) > len(equations): + raise ValueError("Runge-Kutta implicit timestep has more " + "unknowns than equations") + elif len(unknowns) < len(equations): + raise ValueError("Runge-Kutta implicit timestep has more " + "equations than unknowns") del equations[:] knowns.update(unknowns) @@ -717,7 +726,7 @@ class DIRK5MethodBuilder(ImplicitButcherTableauMethodBuilder): 2: DIRK2MethodBuilder, 3: DIRK3MethodBuilder, 4: DIRK4MethodBuilder, - 5: DIRK4MethodBuilder, + 5: DIRK5MethodBuilder, } # }}} diff --git a/leap/step_matrix.py b/leap/step_matrix.py index 49ab7a3..9b0afac 100644 --- a/leap/step_matrix.py +++ b/leap/step_matrix.py @@ -107,10 +107,12 @@ def _get_state_variables(self): VectorComponent = namedtuple("VectorComponent", "name, index") - def run_symbolic_step(self, phase_name, shapes={}): + def run_symbolic_step(self, phase_name, shapes=None): """ `shapes` maps variable names to vector lengths. """ + if shapes is None: + shapes = {} phase = self.code.phases[phase_name] from pymbolic import var @@ -139,12 +141,14 @@ def run_symbolic_step(self, phase_name, shapes={}): self.exec_controller.reset() self.exec_controller.update_plan(phase, phase.depends_on) - for event in self.exec_controller(phase, self): + for _event in self.exec_controller(phase, self): pass return components, initial_vals - def get_maxima_expressions(self, phase_name, shapes={}): + def get_maxima_expressions(self, phase_name, shapes=None): + if shapes is None: + shapes = {} components, initial_vals = self.run_symbolic_step(phase_name, shapes) lines = [] @@ -166,7 +170,7 @@ def msm_expr_list(name, exprs): msm_expr_list("initial", initial_vals) exprs = [] - for i, v in enumerate(components): + for v in components: # Get the expression for v. if isinstance(v, self.VectorComponent): expr = self.context[v.name][v.index] @@ -179,7 +183,7 @@ def msm_expr_list(name, exprs): return "\n".join(lines) - def get_phase_step_matrix(self, phase_name, shapes={}, sparse=False): + def get_phase_step_matrix(self, phase_name, shapes=None, sparse=False): """ `shapes` maps variable names to vector lengths. @@ -188,6 +192,9 @@ def get_phase_step_matrix(self, phase_name, shapes={}, sparse=False): Otherwise returns a numpy object array. """ + if shapes is None: + shapes = {} + components, initial_vals = self.run_symbolic_step(phase_name, shapes) from pymbolic.mapper.differentiator import DifferentiationMapper diff --git a/leap/version.py b/leap/version.py index 5cea885..30c9629 100644 --- a/leap/version.py +++ b/leap/version.py @@ -1,2 +1,2 @@ -VERSION = (2020, 1) +VERSION = (2021, 1) VERSION_TEXT = ".".join(str(i) for i in VERSION) diff --git a/requirements.txt b/requirements.txt index 731a94b..de5cd4e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -git+https://github.com/inducer/pytools.git -git+https://github.com/inducer/pymbolic.git -git+https://github.com/inducer/dagrt.git +git+https://github.com/inducer/pytools.git#egg=pytools +git+https://github.com/inducer/pymbolic.git#egg=pymbolic +git+https://github.com/inducer/dagrt.git#egg=dagrt diff --git a/setup.cfg b/setup.cfg index bb1aa21..bec278b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,7 +1,9 @@ [flake8] -ignore = E126,E127,E128,E123,E131,E226,E241,E242,E261,E265,N802,W503,E402 +ignore = E126,E127,E128,E123,E226,E241,E242,E261,E265,N802,W503,E402 max-line-length=85 inline-quotes = " docstring-quotes = """ multiline-quotes = """ + +# enable-flake8-bugbear diff --git a/setup.py b/setup.py index 22ad820..c78f3ba 100644 --- a/setup.py +++ b/setup.py @@ -6,47 +6,45 @@ def main(): version_dict = {} init_filename = "leap/version.py" - exec(compile(open(init_filename).read(), init_filename, "exec"), - version_dict) - - setup(name="leap", - version=version_dict["VERSION_TEXT"], - description="Time integration by code generation", - long_description=open("README.rst").read(), - author="Andreas Kloeckner", - author_email="inform@tiker.net", - license="MIT", - url="http://wiki.tiker.net/Leap", - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Intended Audience :: Other Audience', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: MIT License', - 'Natural Language :: English', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Information Analysis', - 'Topic :: Scientific/Engineering :: Mathematics', - 'Topic :: Scientific/Engineering :: Visualization', - 'Topic :: Software Development :: Libraries', - 'Topic :: Utilities', - ], - - packages=find_packages(), - - python_requires="~=3.6", - install_requires=[ - "numpy>=1.5", - "pytools>=2014.1", - "pymbolic>=2014.1", - "pytest>=2.3", - "dagrt>=2019.4", - "mako", - ], - ) - - -if __name__ == '__main__': + exec(compile(open(init_filename).read(), init_filename, "exec"), version_dict) + + setup( + name="leap", + version=version_dict["VERSION_TEXT"], + description="Time integration by code generation", + long_description=open("README.rst").read(), + author="Andreas Kloeckner", + author_email="inform@tiker.net", + license="MIT", + url="https://documen.tician.de/leap", + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Other Audience", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Information Analysis", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Scientific/Engineering :: Visualization", + "Topic :: Software Development :: Libraries", + "Topic :: Utilities", + ], + packages=find_packages(), + python_requires="~=3.6", + install_requires=[ + "numpy>=1.5", + "pytools>=2014.1", + "pymbolic>=2014.1", + "pytest>=2.3", + "dagrt>=2019.4", + "mako", + ], + ) + + +if __name__ == "__main__": main() diff --git a/test/test_embedded_adams.py b/test/test_embedded_adams.py new file mode 100755 index 0000000..4376ac9 --- /dev/null +++ b/test/test_embedded_adams.py @@ -0,0 +1,229 @@ +#! /usr/bin/env python + +__copyright__ = "Copyright (C) 2014 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +# avoid spurious: pytest.mark.parametrize is not callable +# pylint: disable=not-callable + +import sys +import pytest + +from leap.multistep import EmbeddedAdamsMethodBuilder +import numpy as np + +import logging + +from utils import ( # noqa + python_method_impl_interpreter as pmi_int, + python_method_impl_codegen as pmi_cg) + +logger = logging.getLogger(__name__) + + +# {{{ non-adaptive test + +@pytest.mark.parametrize(("method", "expected_order"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, use_high_order=False), 2), + (EmbeddedAdamsMethodBuilder("y", order=3, use_high_order=False), 3), + (EmbeddedAdamsMethodBuilder("y", order=4, use_high_order=False), 4), + ]) +def test_embedded_accuracy(python_method_impl, method, expected_order, + show_dag=False, plot_solution=False): + from utils import check_simple_convergence + check_simple_convergence(method=method, method_impl=python_method_impl, + expected_order=expected_order, show_dag=show_dag, + plot_solution=plot_solution, implicit=True) + +# }}} + +# {{{ adaptive test + + +def solver(f, t, sub_y, coeff, guess): + from scipy.optimize import root + return root(lambda unk: unk - f(t=t, y=sub_y + coeff*unk), guess).x + + +def solver_hook(solve_expr, solve_var, solver_id, guess): + from dagrt.expression import match, substitute + + pieces = match("unk - rhs(t=t, y=sub_y + coeff*unk)", solve_expr, + pre_match={"unk": solve_var}) + pieces["guess"] = guess + return substitute("solver(t, sub_y, coeff, guess)", pieces) + + +@pytest.mark.parametrize(("method", "ss_frac", "bs_frac"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, rtol=1e-6), 0.5, 0.05), + (EmbeddedAdamsMethodBuilder("y", order=3, rtol=1e-6), 0.5, 0.01), + (EmbeddedAdamsMethodBuilder("y", order=4, rtol=1e-6), 0.8, 0.0004), + ]) +def test_adaptive_timestep(python_method_impl, method, ss_frac, bs_frac, + show_dag=False, plot=False): + from utils import check_adaptive_timestep + check_adaptive_timestep(python_method_impl=python_method_impl, method=method, + ss_frac=ss_frac, bs_frac=bs_frac, show_dag=show_dag, + plot=plot, implicit=True) + + +@pytest.mark.parametrize(("method", "expected_order"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, rtol=1e-6), 2), + (EmbeddedAdamsMethodBuilder("y", order=3, rtol=1e-6), 3), + (EmbeddedAdamsMethodBuilder("y", order=4, rtol=1e-6), 4), + ]) +def test_adaptive_accuracy(method, expected_order, show_dag=False, + plot=False, python_method_impl=pmi_cg): + # Use "DEBUG" to trace execution + logging.basicConfig(level=logging.INFO) + + component_id = method.component_id + code = method.generate() + + from leap.implicit import replace_AssignImplicit + code = replace_AssignImplicit(code, {"solve": solver_hook}) + + code_nonadapt = EmbeddedAdamsMethodBuilder("y", order=expected_order).generate() + code_nonadapt = replace_AssignImplicit(code_nonadapt, {"solve": solver_hook}) + + if show_dag: + from dagrt.language import show_dependency_graph + show_dependency_graph(code) + + from stiff_test_systems import VanDerPolProblem + example = VanDerPolProblem() + y = example.initial() + + from functools import partial + interp = python_method_impl(code, + function_map={"" + component_id: example, + "solver": partial(solver, example)}) + + interp_nonadapt = python_method_impl(code_nonadapt, + function_map={"" + component_id: example, + "solver": partial(solver, example)}) + + interp.set_up(t_start=example.t_start, dt_start=1e-5, context={component_id: y}) + + times = [] + values = [] + + new_times = [] + new_values = [] + + last_t = 0 + step_sizes = [] + nsteps = [] + istep = 0 + + # Initial run to establish step sizes. + for event in interp.run(t_end=example.t_end/10.0): + if isinstance(event, interp.StateComputed): + assert event.component_id == component_id + + new_values.append(event.state_component) + new_times.append(event.t) + elif isinstance(event, interp.StepCompleted): + if not new_times: + continue + + istep += 1 + step_sizes.append(event.t - last_t) + last_t = event.t + + times.extend(new_times) + values.extend(new_values) + del new_times[:] + del new_values[:] + elif isinstance(event, interp.StepFailed): + del new_times[:] + del new_values[:] + + logger.info("failed step at t=%s" % event.t) + + times = np.array(times) + values = np.array(values) + step_sizes = np.array(step_sizes) + nsteps = len(step_sizes) + final_time = times[-1] + final_val = values[-1] + end_vals = [] + end_vals.append(final_val) + dts = [] + dts.append(10.0/nsteps) + from pytools.convergence import EOCRecorder + eocrec = EOCRecorder() + for i in range(1, 5): + fac = 2**i + nsteps_run = fac*nsteps + dt = np.zeros(nsteps_run) + + for j in range(0, nsteps_run, fac): + dt[j] = step_sizes[int(j/fac)]/fac + for k in range(1, fac): + dt[j+k] = step_sizes[int(j/fac)]/fac + + # Now that we have our new set of timesteps, do the run, + # same as before, but with adaptivity turned off. + interp_nonadapt.set_up(t_start=example.t_start, dt_start=dt[0], + context={component_id: y}) + iout = 1 + for event in interp_nonadapt.run(t_end=final_time): + if isinstance(event, interp_nonadapt.StateComputed): + assert event.component_id == component_id + + end_val = event.state_component + elif isinstance(event, interp_nonadapt.StepCompleted): + if iout < nsteps*fac: + if event.t + dt[iout] >= final_time: + interp_nonadapt.dt = final_time - event.t + else: + interp_nonadapt.dt = dt[iout] + iout += 1 + else: + interp_nonadapt.dt = final_time - event.t + + end_vals.append(end_val) + dts.append(10.0/iout) + + # Now calculate errors using the final time as the + # true solution (self-convergence) + for i in range(1, 5): + eocrec.add_data_point(dts[i-1], + np.linalg.norm(end_vals[i-1] - end_vals[-1])) + + print(eocrec.pretty_print()) + orderest = eocrec.estimate_order_of_convergence()[0, 1] + assert orderest > 0.9 * expected_order + + +# }}} + + +if __name__ == "__main__": + if len(sys.argv) > 1: + exec(sys.argv[1]) + else: + from pytest import main + main([__file__]) + +# vim: filetype=pyopencl:fdm=marker diff --git a/test/test_multirate.py b/test/test_multirate.py index 8ebd7ae..a90a999 100644 --- a/test/test_multirate.py +++ b/test/test_multirate.py @@ -321,7 +321,7 @@ def rhs_slow(t, fast, slow): @pytest.mark.parametrize("method_name", ["F", "Fqsr", "Srsf", "S"]) def test_2rab_scheme_explainers(method_name, order=3, step_ratio=3, - explainer=TextualSchemeExplainer()): + explainer=TextualSchemeExplainer()): # noqa: B008 method = TwoRateAdamsBashforthMethodBuilder( method_name, order=order, step_ratio=step_ratio) @@ -330,7 +330,7 @@ def test_2rab_scheme_explainers(method_name, order=3, step_ratio=3, def test_mrab_scheme_explainers(order=3, step_ratio=3, - explainer=TextualSchemeExplainer()): + explainer=TextualSchemeExplainer()): # noqa: B008 method = MultiRateMultiStepMethodBuilder( order, ( @@ -350,7 +350,7 @@ def test_mrab_scheme_explainers(order=3, step_ratio=3, def test_mrab_with_derived_state_scheme_explainers(order=3, step_ratio=3, - explainer=TextualSchemeExplainer()): + explainer=TextualSchemeExplainer()): # noqa: B008 method = MultiRateMultiStepMethodBuilder( order, ( @@ -463,7 +463,7 @@ def true_s(t): s_times.append(event.t) s_values.append(event.state_component) else: - assert False, event.component_id + raise ValueError(event.component_id) f_times = np.array(f_times) s_times = np.array(s_times) @@ -558,7 +558,7 @@ def true_s(t): s_times.append(event.t) s_values.append(event.state_component) else: - assert False, event.component_id + raise ValueError(event.component_id) f_times = np.array(f_times) s_times = np.array(s_times) diff --git a/test/test_rk.py b/test/test_rk.py index 6786476..a76c41b 100755 --- a/test/test_rk.py +++ b/test/test_rk.py @@ -35,9 +35,11 @@ RK3MethodBuilder, RK4MethodBuilder, RK5MethodBuilder, LSRK4MethodBuilder, SSPRK22MethodBuilder, SSPRK33MethodBuilder, + BackwardEulerMethodBuilder, DIRK2MethodBuilder, + DIRK3MethodBuilder, DIRK4MethodBuilder, + DIRK5MethodBuilder, ) from leap.rk.imex import KennedyCarpenterIMEXARK4MethodBuilder -import numpy as np import logging @@ -77,6 +79,21 @@ def test_rk_accuracy(python_method_impl, method, expected_order, expected_order=expected_order, show_dag=show_dag, plot_solution=plot_solution) + +@pytest.mark.parametrize(("method", "expected_order"), [ + (BackwardEulerMethodBuilder("y"), 1), + (DIRK2MethodBuilder("y"), 2), + (DIRK3MethodBuilder("y"), 3), + (DIRK4MethodBuilder("y"), 4), + (DIRK5MethodBuilder("y"), 5), + ]) +def test_implicit_rk_accuracy(python_method_impl, method, expected_order, + show_dag=False, plot_solution=False): + from utils import check_simple_convergence + check_simple_convergence(method=method, method_impl=python_method_impl, + expected_order=expected_order, show_dag=show_dag, + plot_solution=plot_solution, implicit=True) + # }}} @@ -90,77 +107,10 @@ def test_rk_accuracy(python_method_impl, method, expected_order, ]) def test_adaptive_timestep(python_method_impl, method, show_dag=False, plot=False): - # Use "DEBUG" to trace execution - logging.basicConfig(level=logging.INFO) - - component_id = method.component_id - code = method.generate() - print(code) - #1/0 - - if show_dag: - from dagrt.language import show_dependency_graph - show_dependency_graph(code) - - from stiff_test_systems import VanDerPolProblem - example = VanDerPolProblem() - y = example.initial() - - interp = python_method_impl(code, - function_map={"" + component_id: example}) - interp.set_up(t_start=example.t_start, dt_start=1e-5, context={component_id: y}) - - times = [] - values = [] - - new_times = [] - new_values = [] - - last_t = 0 - step_sizes = [] - - for event in interp.run(t_end=example.t_end): - if isinstance(event, interp.StateComputed): - assert event.component_id == component_id - - new_values.append(event.state_component) - new_times.append(event.t) - elif isinstance(event, interp.StepCompleted): - if not new_times: - continue - - step_sizes.append(event.t - last_t) - last_t = event.t - - times.extend(new_times) - values.extend(new_values) - del new_times[:] - del new_values[:] - elif isinstance(event, interp.StepFailed): - del new_times[:] - del new_values[:] - - logger.info("failed step at t=%s" % event.t) - - times = np.array(times) - values = np.array(values) - step_sizes = np.array(step_sizes) - - if plot: - import matplotlib.pyplot as pt - pt.plot(times, values[:, 1], "x-") - pt.show() - pt.plot(times, step_sizes, "x-") - pt.show() - - step_sizes = np.array(step_sizes) - small_step_frac = len(np.nonzero(step_sizes < 0.01)[0]) / len(step_sizes) - big_step_frac = len(np.nonzero(step_sizes > 0.05)[0]) / len(step_sizes) - - print("small_step_frac (<0.01): %g - big_step_frac (>.05): %g" - % (small_step_frac, big_step_frac)) - assert small_step_frac <= 0.35, small_step_frac - assert big_step_frac >= 0.16, big_step_frac + from utils import check_adaptive_timestep + check_adaptive_timestep(python_method_impl=python_method_impl, method=method, + ss_frac=0.35, bs_frac=0.16, show_dag=show_dag, + plot=plot, implicit=False) # }}} diff --git a/test/test_step_matrix.py b/test/test_step_matrix.py index 5580077..f6ce33e 100755 --- a/test/test_step_matrix.py +++ b/test/test_step_matrix.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#the ! /usr/bin/env python __copyright__ = "Copyright (C) 2014 Andreas Kloeckner, Matt Wala" @@ -85,14 +85,14 @@ def rhs(t, y): interp.set_up(t_start=0, dt_start=dt, context={component_id: 15}) assert interp.next_phase == "initial" - for event in interp.run_single_step(): + for _event in interp.run_single_step(): pass assert interp.next_phase == "primary" start_values = np.array( [interp.context[v] for v in finder.variables]) - for event in interp.run_single_step(): + for _event in interp.run_single_step(): pass assert interp.next_phase == "primary" diff --git a/test/utils.py b/test/utils.py index 4f22cfc..14a5c69 100644 --- a/test/utils.py +++ b/test/utils.py @@ -21,6 +21,9 @@ """ import numpy as np +import logging + +logger = logging.getLogger(__name__) # {{{ things to pass for python_method_impl @@ -52,8 +55,10 @@ def solver_hook(solve_expr, solve_var, solver_id, guess): return substitute("solver(t, sub_y, coeff, guess)", pieces) -def execute_and_return_single_result(python_method_impl, code, initial_context={}, +def execute_and_return_single_result(python_method_impl, code, initial_context=None, max_steps=1): + if initial_context is None: + initial_context = {} interpreter = python_method_impl(code, function_map={}) interpreter.set_up(t_start=0, dt_start=0, context=initial_context) has_state_component = False @@ -108,11 +113,14 @@ def __call__(self, t, y): def check_simple_convergence(method, method_impl, expected_order, - problem=DefaultProblem(), dts=_default_dts, + problem=None, dts=_default_dts, show_dag=False, plot_solution=False, implicit=False): + if problem is None: + problem = DefaultProblem() + component_id = method.component_id code = method.generate() - print(code) + #print(code) if show_dag: from dagrt.language import show_dependency_graph @@ -170,7 +178,93 @@ def check_simple_convergence(method, method_impl, expected_order, print(eocrec.pretty_print()) orderest = eocrec.estimate_order_of_convergence()[0, 1] - assert orderest > expected_order * 0.9 + assert orderest > expected_order * 0.89 + + +def check_adaptive_timestep(python_method_impl, method, ss_frac, bs_frac, + show_dag=False, plot=False, implicit=False): + # Use "DEBUG" to trace execution + logging.basicConfig(level=logging.INFO) + + component_id = method.component_id + code = method.generate() + #print(code) + #1/0 + + if implicit: + from leap.implicit import replace_AssignImplicit + code = replace_AssignImplicit(code, {"solve": solver_hook}) + + if show_dag: + from dagrt.language import show_dependency_graph + show_dependency_graph(code) + + from stiff_test_systems import VanDerPolProblem + example = VanDerPolProblem() + y = example.initial() + + if implicit: + from functools import partial + interp = python_method_impl(code, + function_map={"" + component_id: example, + "solver": partial(solver, example)}) + else: + interp = python_method_impl(code, + function_map={"" + component_id: example}) + interp.set_up(t_start=example.t_start, dt_start=1e-5, context={component_id: y}) + + times = [] + values = [] + + new_times = [] + new_values = [] + + last_t = 0 + step_sizes = [] + + for event in interp.run(t_end=example.t_end): + if isinstance(event, interp.StateComputed): + assert event.component_id == component_id + + new_values.append(event.state_component) + new_times.append(event.t) + elif isinstance(event, interp.StepCompleted): + if not new_times: + continue + + step_sizes.append(event.t - last_t) + last_t = event.t + + times.extend(new_times) + values.extend(new_values) + del new_times[:] + del new_values[:] + elif isinstance(event, interp.StepFailed): + del new_times[:] + del new_values[:] + + logger.info("failed step at t=%s" % event.t) + + times = np.array(times) + values = np.array(values) + step_sizes = np.array(step_sizes) + + if plot: + import matplotlib.pyplot as pt + pt.clf() + pt.plot(times, values[:, 1], "x-") + pt.show() + pt.plot(times, step_sizes, "x-") + pt.show() + + step_sizes = np.array(step_sizes) + small_step_frac = len(np.nonzero(step_sizes < 0.01)[0]) / len(step_sizes) + big_step_frac = len(np.nonzero(step_sizes > 0.05)[0]) / len(step_sizes) + + print("small_step_frac (<0.01): %g - big_step_frac (>.05): %g" + % (small_step_frac, big_step_frac)) + assert small_step_frac <= ss_frac, small_step_frac + assert big_step_frac >= bs_frac, big_step_frac # vim: foldmethod=marker