diff --git a/.claude/claude.md b/.claude/claude.md index 6bd3e54..0e38145 100644 --- a/.claude/claude.md +++ b/.claude/claude.md @@ -45,7 +45,6 @@ src/ add_jump_expressions.jl # JuMP expression builders add_param_container.jl # Parameter container builders add_constraint_dual.jl # Constraint & dual helpers - add_pwl_methods.jl # Piecewise-linear variable/constraint methods constraint_helpers.jl # Generic constraint utilities range_constraint.jl # Min/max range constraints duration_constraints.jl # Min up/down time constraints @@ -82,6 +81,17 @@ src/ initial_conditions_update_in_memory_store.jl model_numerical_analysis_utils.jl optimization_debugging.jl # Debug/diagnostic tools + quadratic_approximations/ # PWL approximation of x² (univariate) + pwl_utils.jl # Shared breakpoint generator, _square helper + incremental.jl # Incremental PWL formulation (δ/z variables) + solver_sos2.jl # Solver-native SOS2 quadratic approximation + manual_sos2.jl # Manual SOS2 (binary adjacency) quadratic approximation + sawtooth.jl # Sawtooth relaxation approximation + epigraph.jl # Epigraph-based approximation + bilinear_approximations/ # Approximation of bilinear products x·y + mccormick.jl # McCormick envelopes for bilinear terms + bilinear.jl # Bin2 separable bilinear approximation + hybs.jl # HybS hybrid separable approximation utils/ # General-purpose utilities jump_utils.jl # JuMP helper functions dataframes_utils.jl # DataFrame manipulation @@ -142,6 +152,12 @@ scripts/formatter/ # Code formatting (JuliaFormatter) concrete formulations call into. - **`src/objective_function/`** translates cost curves into JuMP objective terms. Each cost curve type has its own file. +- **`src/quadratic_approximations/`** implements PWL approximation methods for x²: + SOS2 (solver and manual), sawtooth, epigraph, plus the incremental formulation + and shared breakpoint utilities. +- **`src/bilinear_approximations/`** implements approximation methods for bilinear + products x·y: Bin2 separable decomposition, HybS hybrid relaxation, and McCormick + envelopes. - **`src/operation/`** implements `DecisionModel` and `EmulationModel` — the two main model types — plus serialization, output stores, and the problem template. - **`src/utils/`** is for pure utility functions with no domain coupling. diff --git a/docs/make.jl b/docs/make.jl index 5dcc870..92924c3 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -20,7 +20,8 @@ pages = OrderedDict( "Developers" => ["Developer Guidelines" => "reference/developer_guidelines.md", "Internals" => "reference/internal.md"], "Public API" => "reference/public.md", - "Quadratic Approximations" => "reference/quadratic_approximations.md", + "Piecewise Linear Cost Functions" => "explanation/piecewise_linear_cost_functions.md", + "Quadratic Approximations" => "explanation/quadratic_approximations.md", ], ) diff --git a/docs/src/explanation/piecewise_linear_cost_functions.md b/docs/src/explanation/piecewise_linear_cost_functions.md new file mode 100644 index 0000000..e56982c --- /dev/null +++ b/docs/src/explanation/piecewise_linear_cost_functions.md @@ -0,0 +1,188 @@ +# Piecewise Linear Cost Functions + +```@meta +CurrentModule = InfrastructureOptimizationModels +``` + +This package provides two formulations for representing piecewise linear (PWL) cost +functions in optimization models. Both formulations express an operating point and its +cost as a point on a piecewise linear curve connecting breakpoints +``(P_0, C_0), (P_1, C_1), \ldots, (P_n, C_n)``, but they approach the problem differently. + +## Lambda Formulation (Convex Combination) + +Used by `CostCurve{PiecewisePointCurve}` and `FuelCurve{PiecewisePointCurve}`. + +The lambda formulation assigns a weight ``\lambda_i`` to each **breakpoint**. The +operating point and cost are expressed as a weighted average of the breakpoint values. + +### Formulation + +Given ``n + 1`` breakpoints with power levels ``P_0, \ldots, P_n`` and costs +``C(P_0), \ldots, C(P_n)``: + +**Variables:** + +```math +\lambda_i \in [0, 1], \quad i = 0, 1, \ldots, n +``` + +**Constraints:** + +```math +\begin{aligned} +p &= \sum_{i=0}^{n} \lambda_i \, P_i && \text{(linking)} \\ +C(p) &= \sum_{i=0}^{n} \lambda_i \, C(P_i) && \text{(cost)} \\ +\sum_{i=0}^{n} \lambda_i &= u && \text{(normalization, } u \text{ = on-status)} \\ +\text{at most two adjacent } &\lambda_i \text{ may be nonzero} && \text{(adjacency)} +\end{aligned} +``` + +### Adjacency Enforcement + +The adjacency condition keeps the operating point on a single line segment. For +**convex** cost curves (where each segment is more expensive than the last), the +optimizer naturally satisfies this condition — no extra constraints are needed. + +For **non-convex** cost curves, an +[SOS2 constraint](https://en.wikipedia.org/wiki/Special_ordered_set) is added to +enforce adjacency explicitly. This requires solver support for SOS2, and effectively +introduces additional branching in the solver. + +### Compact Form + +When the power variable represents output above minimum generation +(PowerAboveMinimumVariable), a compact variant adjusts the linking constraint to +include a ``P_{\min}`` offset: + +```math +\sum_{i=0}^{n} \lambda_i \, P_i = p + P_{\min} \cdot u +``` + +### Variables and Constraints + +| Container Type | Description | +|:---------------------------------------------------- |:-------------------------------------------- | +| [`PiecewiseLinearCostVariable`](@ref) | Lambda weights ``\lambda_i \in [0, 1]`` | +| [`PiecewiseLinearCostConstraint`](@ref) | Links power variable to weighted breakpoints | +| [`PiecewiseLinearCostNormalizationConstraint`](@ref) | Ensures ``\sum \lambda_i = u`` | + +### Implementation + +| Function | Role | +|:------------------------------- |:-------------------------------------------------------------- | +| `_add_pwl_variables!` | Creates ``n + 1`` lambda variables per time step | +| `_add_pwl_constraint_standard!` | Adds linking and normalization constraints | +| `_add_pwl_constraint_compact!` | Compact form with ``P_{\min}`` offset | +| `_add_pwl_term!` | Orchestrates variables, constraints, SOS2, and cost expression | +| `add_pwl_sos2_constraint!` | Adds SOS2 adjacency for non-convex curves | + +## Delta Formulation (Incremental / Block Offers) + +Used by `MarketBidCost` and `ImportExportCost` offer curves. + +The delta formulation assigns a variable ``\delta_k`` to each **segment**, representing +how much of that segment has been used. The operating point is the sum of segment +contributions from the first breakpoint. + +### Formulation + +Given ``n`` segments with breakpoints ``P_0, \ldots, P_n`` and marginal costs (slopes) +``m_k = \frac{C(P_k) - C(P_{k-1})}{P_k - P_{k-1}}``: + +**Variables:** + +```math +\delta_k \geq 0, \quad k = 1, \ldots, n +``` + +**Constraints:** + +```math +\begin{aligned} +p &= \sum_{k=1}^{n} \delta_k + P_{\min,\text{offset}} && \text{(linking)} \\ +\delta_k &\leq P_k - P_{k-1} && \text{(segment capacity)} \\ +C(p) &= \sum_{k=1}^{n} m_k \, \delta_k && \text{(cost)} +\end{aligned} +``` + +### Convexity Advantage + +For convex offer curves (``m_1 \leq m_2 \leq \cdots \leq m_n``), the fill-order +condition is automatically satisfied. A cost-minimizing optimizer fills cheap segments +before expensive ones, so no SOS2 constraints or binary variables are needed. This is +the common case in power systems, where generator marginal costs are typically +increasing. + +### Variables and Constraints + +| Container Type | Description | +|:-------------------------------------------------------- |:-------------------------------------- | +| [`PiecewiseLinearBlockIncrementalOffer`](@ref) | Delta variables for incremental offers | +| [`PiecewiseLinearBlockDecrementalOffer`](@ref) | Delta variables for decremental offers | +| [`PiecewiseLinearBlockIncrementalOfferConstraint`](@ref) | Linking + capacity for incremental | +| [`PiecewiseLinearBlockDecrementalOfferConstraint`](@ref) | Linking + capacity for decremental | + +### Implementation + +| Function | Role | +|:---------------------------------- |:-------------------------------------------------------- | +| `add_pwl_variables!` | Creates ``n`` delta variables per time step | +| `_add_pwl_constraint!` | Adds linking and segment capacity constraints | +| `add_pwl_term!` | Orchestrates variables, constraints, and cost expression | +| `get_pwl_cost_expression` | Builds ``\sum m_k \, \delta_k`` cost expression | +| `add_pwl_block_offer_constraints!` | Low-level block-offer constraint builder | + +## Comparison + +| | Lambda (``\lambda``) | Delta (``\delta``) | +|:-------------------- |:---------------------------------- |:----------------------------------- | +| **Thinks about** | Breakpoints (the dots) | Segments (the lines) | +| **Variables** | ``n + 1`` (one per breakpoint) | ``n`` (one per segment) | +| **Output equation** | ``p = \sum \lambda_i \, P_i`` | ``p = \sum \delta_k + P_{\min}`` | +| **Cost equation** | ``C = \sum \lambda_i \, C(P_i)`` | ``C = \sum m_k \, \delta_k`` | +| **Adjacency rule** | Must be enforced explicitly (SOS2) | Often automatic (convex case) | +| **Binary variables** | Usually needed (non-convex) | Often not needed (convex case) | +| **Used by** | `CostCurve`, `FuelCurve` | `MarketBidCost`, `ImportExportCost` | + +## When Does the Choice Matter? + +For **convex** cost functions — where each segment costs more than the last — the delta +formulation is typically simpler and faster. The optimizer fills cheap segments first on +its own, so no extra adjacency constraints are needed. This is the common case in power +systems. + +For **non-convex** cost functions — where a segment might be cheaper than the one before +it — both formulations need additional constraints (SOS2 or binary variables) to force +the correct ordering. In that case the lambda formulation is the traditional choice. + +## Incremental Interpolation (General PWL Approximation) + +In addition to cost functions, the package provides a general-purpose incremental +(delta) method for approximating arbitrary nonlinear functions as piecewise linear. This +is used for PWL approximation of constraints (e.g., loss functions, quadratic terms). + +### Formulation + +Given breakpoints ``(x_1, y_1), \ldots, (x_{n+1}, y_{n+1})`` where ``y_i = f(x_i)``, +and interpolation variables ``\delta_i \in [0, 1]`` with binary ordering variables +``z_i \in \{0, 1\}``: + +```math +\begin{aligned} +x &= x_1 + \sum_{i=1}^{n} \delta_i \, (x_{i+1} - x_i) \\ +y &= y_1 + \sum_{i=1}^{n} \delta_i \, (y_{i+1} - y_i) \\ +z_i &\geq \delta_{i+1}, \quad z_i \leq \delta_i \quad \text{(ordering)} +\end{aligned} +``` + +The ordering constraints ensure segments are filled sequentially: ``\delta_1`` must be +filled before ``\delta_2`` can begin. + +### Implementation + +| Function | Role | +|:---------------------------------------------------- |:------------------------------------------- | +| `_get_breakpoints_for_pwl_function` | Generates equally-spaced breakpoints | +| `add_sparse_pwl_interpolation_variables!` | Creates ``\delta`` and ``z`` variables | +| `_add_generic_incremental_interpolation_constraint!` | Adds interpolation and ordering constraints | diff --git a/docs/src/reference/quadratic_approximations.md b/docs/src/explanation/quadratic_approximations.md similarity index 54% rename from docs/src/reference/quadratic_approximations.md rename to docs/src/explanation/quadratic_approximations.md index 082e1e3..8e75fba 100644 --- a/docs/src/reference/quadratic_approximations.md +++ b/docs/src/explanation/quadratic_approximations.md @@ -99,7 +99,7 @@ linear constraints per component per time step. Same approximation quality as So ## Sawtooth Approximates ``x^2`` using the recursive sawtooth MIP formulation from -Beach, Burlacu, Hager, and Hildebrand (2024). This method requires only ``O(\log(1/\varepsilon))`` +Beach, Burlacu, Hager, and Hildebrand (2023). This method requires only ``O(\log(1/\varepsilon))`` binary variables to achieve error ``\varepsilon``, compared to ``O(1/\sqrt{\varepsilon})`` for the SOS2 methods. @@ -162,3 +162,97 @@ constraints per component per time step. The approximation interpolates ``x^2`` To match the number of breakpoints between methods, set ``S = 2^L``. At equal breakpoint count the approximation quality is identical, but the sawtooth uses ``L = \log_2 S`` binary variables instead of ``S``. + +## Error Scaling + +Both SOS2 and sawtooth produce the same PWL interpolation of ``x^2`` when they use the +same number of uniform breakpoints. With ``n`` uniform segments on ``[0, 1]``, the +interpolant ``F_n(x)`` satisfies the classical error bound (Barmann et al., 2023): + +```math +0 \leq F_n(x) - x^2 \leq \frac{1}{4n^2} \quad \text{for all } x \in [0, 1] +``` + +The maximum error is attained at the midpoint of each segment. Since both methods +interpolate at the same breakpoints, the pointwise error is identical — the difference +lies entirely in how efficiently the segments are encoded. + +### Same number of binary variables + +If we budget ``L`` binary/integer variables for each method, SOS2 gets ``L`` segments +while sawtooth gets ``2^L`` segments. The error gap grows exponentially: + +| ``L`` | SOS2 segments | SOS2 error | Sawtooth segments | Sawtooth error | Ratio | +|:----- |:------------- |:--------------------- |:----------------- |:--------------------- |:----- | +| 1 | 1 | ``2.50\times10^{-1}`` | 2 | ``6.25\times10^{-2}`` | 4x | +| 2 | 2 | ``6.25\times10^{-2}`` | 4 | ``1.56\times10^{-2}`` | 4x | +| 3 | 3 | ``2.78\times10^{-2}`` | 8 | ``3.91\times10^{-3}`` | 7x | +| 4 | 4 | ``1.56\times10^{-2}`` | 16 | ``9.77\times10^{-4}`` | 16x | +| 5 | 5 | ``1.00\times10^{-2}`` | 32 | ``2.44\times10^{-4}`` | 41x | +| 6 | 6 | ``6.94\times10^{-3}`` | 64 | ``6.10\times10^{-5}`` | 114x | +| 7 | 7 | ``5.10\times10^{-3}`` | 128 | ``1.53\times10^{-5}`` | 334x | +| 8 | 8 | ``3.91\times10^{-3}`` | 256 | ``3.81\times10^{-6}`` | 1024x | + +SOS2 error decays **polynomially** as ``O(1/L^2)``; sawtooth error decays +**exponentially** as ``O(4^{-L})``. + +### Same number of segments + +When both methods use the same number of uniform segments ``n``, they produce identical +PWL interpolations, so the approximation error is the same. The difference is in +formulation size: SOS2 needs ``O(n)`` binary variables, sawtooth needs only +``\log_2(n)``. + +| Segments ``n`` | Error | SOS2 vars | Sawtooth vars | Var ratio | +|:-------------- |:--------------------- |:--------- |:------------- |:--------- | +| 2 | ``6.25\times10^{-2}`` | 1 | 1 | 1.0x | +| 4 | ``1.56\times10^{-2}`` | 3 | 2 | 1.5x | +| 8 | ``3.91\times10^{-3}`` | 7 | 3 | 2.3x | +| 16 | ``9.77\times10^{-4}`` | 15 | 4 | 3.8x | +| 32 | ``2.44\times10^{-4}`` | 31 | 5 | 6.2x | +| 64 | ``6.10\times10^{-5}`` | 63 | 6 | 10.5x | +| 128 | ``1.53\times10^{-5}`` | 127 | 7 | 18.1x | +| 256 | ``3.81\times10^{-6}`` | 255 | 8 | 31.9x | + +## Extension to Bilinear Terms + +Bilinear terms ``z = xy`` arise throughout optimization (energy systems, pooling problems, +gas networks). The standard univariate reformulation uses the identity: + +```math +xy = \frac{1}{4}\left[(x + y)^2 - (x - y)^2\right] +``` + +Each squared term is approximated independently with one of the methods above. With both +terms at the same refinement level, the bilinear error satisfies: + +```math +\varepsilon_{xy} \leq \frac{1}{2} \, \varepsilon_{\text{quad}} +``` + +All scaling relationships from the univariate case carry over with a constant factor of +``1/2``. The exponential gap between SOS2 and sawtooth persists. + +## Practical Considerations + +**SOS2** has the advantage of being natively supported by commercial solvers (Gurobi, +CPLEX) with specialized branching rules. Both the solver SOS2 and manual SOS2 +formulations produce sharp LP relaxations for convex functions. + +**Sawtooth** introduces auxiliary continuous variables and big-M-type constraints, which +may interact less favorably with presolve and cutting planes. However, Beach et al. (2023) +show that the sawtooth relaxation is **sharp** (its LP relaxation equals the convex hull) +and **hereditarily sharp** (sharpness is preserved at every node in the branch-and-bound +tree). This strong theoretical property, combined with exponentially fewer binary +variables, can lead to significant solver performance gains for problems requiring high +approximation accuracy. + +Whether the tighter approximation at a given variable budget outweighs the structural +advantages of SOS2 depends on the specific problem and solver. + +## References + + - Beach, B., Burlacu, R., Barmann, A., Hager, L., Kleinert, T. (2023). *Enhancements of discretization approaches for non-convex MIQCQPs*. Journal of Global Optimization. + - Barmann, A., Burlacu, R., Hager, L., Kleinert, T. (2023). *On piecewise linear approximations of bilinear terms: structural comparison of univariate and bivariate MIP formulations*. Journal of Global Optimization, 85, 789-819. + - Yarotsky, D. (2017). *Error bounds for approximations with deep ReLU networks*. Neural Networks, 94, 103-114. + - Huchette, J.A. (2018). *Advanced mixed-integer programming formulations: methodology, computation, and application*. PhD thesis, MIT. diff --git a/docs/src/explanation/stub.md b/docs/src/explanation/stub.md deleted file mode 100644 index 979e4f0..0000000 --- a/docs/src/explanation/stub.md +++ /dev/null @@ -1 +0,0 @@ -Please refer to the [Explanation](https://diataxis.fr/explanation/) section of the diataxis framework. diff --git a/docs/src/reference/developer_guidelines.md b/docs/src/reference/developer_guidelines.md index f8af70d..0452ee7 100644 --- a/docs/src/reference/developer_guidelines.md +++ b/docs/src/reference/developer_guidelines.md @@ -1,6 +1,6 @@ # Developer Guidelines -In order to contribute to `PowerSystems.jl` repository please read the following sections of +In order to contribute to `InfrastructureOptimizationModels.jl` repository please read the following sections of [`InfrastructureSystems.jl`](https://github.com/NREL-Sienna/InfrastructureSystems.jl) documentation in detail: diff --git a/scripts/bilinear_delta_benchmark_v2.jl b/scripts/bilinear_delta_benchmark_v2.jl new file mode 100644 index 0000000..125516b --- /dev/null +++ b/scripts/bilinear_delta_benchmark_v2.jl @@ -0,0 +1,514 @@ +""" +Benchmark script for the bilinear delta model described in `bilinear_delta_model.tex`. + +Builds a network OPF with delta (incremental) PWL costs and bilinear V·I power +balance constraints. Compares five bilinear approximation methods from +InfrastructureOptimizationModels: + + 1. Solver-native SOS2 (Bin2) + 2. Manual SOS2 (Bin2, binary-variable adjacency) + 3. Sawtooth MIP (Bin2) + 4. HybS (Hybrid Separable, Bin2 lower + Bin3 upper) + 5. D-NMDT (Doubly Discretized Normalized Multiparametric Disaggregation) + +Usage: + julia --project=test scripts/bilinear_delta_benchmark_v2.jl [N] [K] [seed] + + N = number of nodes (default 10, must be even) + K = number of PWL cost segments per generator (default 3) + seed = random seed for network generation (default 42) +""" + +using InfrastructureOptimizationModels +using JuMP +using HiGHS +using Dates +using Random +using Printf + +const IOM = InfrastructureOptimizationModels +const IS = IOM.IS + +# Try to load Ipopt for NLP reference; skip if unavailable +const HAS_IPOPT = try + @eval using Ipopt + true +catch + false +end + +# ─── Minimal types for IOM container infrastructure ────────────────────────── + +mutable struct BenchmarkSystem <: IS.InfrastructureSystemsContainer + base_power::Float64 +end + +IOM.get_base_power(sys::BenchmarkSystem) = sys.base_power +IOM.stores_time_series_in_memory(::BenchmarkSystem) = false + +struct NetworkNode <: IS.InfrastructureSystemsComponent end + +struct VoltageVariable <: IOM.VariableType end +struct CurrentVariable <: IOM.VariableType end + +# ─── Network data ──────────────────────────────────────────────────────────── + +struct NetworkData + N::Int + K::Int + gen_nodes::Vector{String} + dem_nodes::Vector{String} + all_nodes::Vector{String} + edges::Vector{Tuple{String, String}} + conductances::Dict{Tuple{String, String}, Float64} + demands::Dict{String, Float64} + marginal_costs::Dict{String, Vector{Float64}} + segment_widths::Dict{String, Vector{Float64}} +end + +""" +Generate a random connected network matching the model in bilinear_delta_model.tex. + +- `N` nodes split evenly into generators and demands. +- `K` PWL cost segments per generator with nondecreasing marginal costs. +- Connected graph via random spanning tree plus extra edges. +- Conductances scaled for feasibility given voltage/current bounds. +- Demands d_i ∈ [0.05, 0.15]. +""" +function generate_network(; N::Int = 10, K::Int = 3, seed::Int = 42) + @assert iseven(N) "N must be even" + rng = MersenneTwister(seed) + + all_nodes = ["n$(i)" for i in 1:N] + gen_nodes = all_nodes[1:(N ÷ 2)] + dem_nodes = all_nodes[(N ÷ 2 + 1):N] + + edges = Tuple{String, String}[] + conductances = Dict{Tuple{String, String}, Float64}() + + # Random spanning tree: shuffle nodes, connect consecutive pairs + perm = shuffle(rng, 1:N) + for idx in 1:(N - 1) + a, b = all_nodes[perm[idx]], all_nodes[perm[idx + 1]] + e = a < b ? (a, b) : (b, a) + if e ∉ edges + push!(edges, e) + conductances[e] = 1.0 + 4.0 * rand(rng) + end + end + + # Extra edges for density + for _ in 1:(N ÷ 2) + i, j = rand(rng, 1:N), rand(rng, 1:N) + i == j && continue + a, b = all_nodes[i], all_nodes[j] + e = a < b ? (a, b) : (b, a) + if e ∉ edges + push!(edges, e) + conductances[e] = 1.0 + 4.0 * rand(rng) + end + end + + # Demands at demand nodes (small enough for network to transport) + demands = Dict(d => 0.05 + 0.1 * rand(rng) for d in dem_nodes) + + # PWL cost data: K segments per generator with nondecreasing marginal costs + marginal_costs = Dict{String, Vector{Float64}}() + segment_widths = Dict{String, Vector{Float64}}() + for g in gen_nodes + mc = sort(rand(rng, K) .* 10.0 .+ 1.0) + marginal_costs[g] = mc + widths = rand(rng, K) .+ 0.1 + widths .*= 1.5 / sum(widths) + segment_widths[g] = widths + end + + return NetworkData( + N, K, gen_nodes, dem_nodes, all_nodes, + edges, conductances, demands, + marginal_costs, segment_widths, + ) +end + +"""Build adjacency list from edge set.""" +function adjacency_list(net::NetworkData) + adj = Dict{String, Vector{Tuple{String, Float64}}}() + for n in net.all_nodes + adj[n] = Tuple{String, Float64}[] + end + for (a, b) in net.edges + g = net.conductances[(a, b)] + push!(adj[a], (b, g)) + push!(adj[b], (a, g)) + end + return adj +end + +# ─── Model constants ───────────────────────────────────────────────────────── + +const V_MIN = 0.8 +const V_MAX = 1.2 +const I_GEN_MIN = 0.0 +const I_GEN_MAX = 1.0 +const I_DEM_MIN = -1.0 +const I_DEM_MAX = 0.0 +const P_MAX = 1.5 + +# ─── NLP reference model (direct bilinear constraints via Ipopt) ───────────── + +function build_nlp_model(net::NetworkData) + model = JuMP.Model() + adj = adjacency_list(net) + + # Variables + @variable(model, V_MIN <= V[n in net.all_nodes] <= V_MAX) + @variable(model, I_GEN_MIN <= Ig[g in net.gen_nodes] <= I_GEN_MAX) + @variable(model, I_DEM_MIN <= Id[d in net.dem_nodes] <= I_DEM_MAX) + @variable(model, 0.0 <= Pg[g in net.gen_nodes] <= P_MAX) + @variable(model, + 0.0 <= delta[g in net.gen_nodes, k in 1:(net.K)] <= net.segment_widths[g][k]) + + # Objective: min Σ m_{i,k} · δ_{i,k} + @objective(model, Min, sum( + net.marginal_costs[g][k] * delta[g, k] + for g in net.gen_nodes, k in 1:(net.K) + )) + + # Bilinear: Pg = V · I (generators) + @constraint(model, [g in net.gen_nodes], Pg[g] == V[g] * Ig[g]) + + # Bilinear: V · I = -d (demands) + @constraint(model, [d in net.dem_nodes], V[d] * Id[d] == -net.demands[d]) + + # KCL + for g in net.gen_nodes + @constraint(model, Ig[g] == sum(c * (V[g] - V[j]) for (j, c) in adj[g])) + end + for d in net.dem_nodes + @constraint(model, Id[d] == sum(c * (V[d] - V[j]) for (j, c) in adj[d])) + end + + # Delta link: Pg = Σ δ_{g,k} + @constraint(model, [g in net.gen_nodes], + Pg[g] == sum(delta[g, k] for k in 1:(net.K))) + + return model +end + +# ─── IOM container setup ───────────────────────────────────────────────────── + +function make_container() + sys = BenchmarkSystem(100.0) + settings = IOM.Settings(sys; horizon = Dates.Hour(1), resolution = Dates.Hour(1)) + container = IOM.OptimizationContainer(sys, settings, JuMP.Model(), IS.Deterministic) + IOM.set_time_steps!(container, 1:1) + return container +end + +# ─── Expression type for retrieving bilinear results ───────────────────────── + +"""Map method symbol to the expression type used to store results.""" +function _get_expression_type(method::Symbol) + if method in (:sos2, :manual_sos2, :sawtooth) + return IOM.BilinearProductExpression() + elseif method === :hybs + return IOM.HybSProductExpression() + elseif method === :dnmdt + return IOM.DNMDTBilinearExpression() + else + error("Unknown method: $method") + end +end + +# ─── MIP model using IOM bilinear approximations ───────────────────────────── + +""" + build_mip_model(net, method, refinement) -> NamedTuple + +Build the MIP approximation of the bilinear delta model. + +# Arguments +- `net::NetworkData`: network data +- `method::Symbol`: `:sos2`, `:manual_sos2`, `:sawtooth`, `:hybs`, or `:dnmdt` +- `refinement::Int`: number of PWL segments (SOS2) or depth (sawtooth/hybs/dnmdt) +""" +function build_mip_model(net::NetworkData, method::Symbol, refinement::Int) + container = make_container() + jump_model = IOM.get_jump_model(container) + time_steps = 1:1 + adj = adjacency_list(net) + + # --- V and I variable containers via IOM --- + V_container = IOM.add_variable_container!( + container, VoltageVariable(), NetworkNode, + net.all_nodes, time_steps, + ) + I_container = IOM.add_variable_container!( + container, CurrentVariable(), NetworkNode, + net.all_nodes, time_steps, + ) + + for n in net.all_nodes + V_container[n, 1] = JuMP.@variable( + jump_model, base_name = "V_$(n)", + lower_bound = V_MIN, upper_bound = V_MAX, + ) + end + for g in net.gen_nodes + I_container[g, 1] = JuMP.@variable( + jump_model, base_name = "I_$(g)", + lower_bound = I_GEN_MIN, upper_bound = I_GEN_MAX, + ) + end + for d in net.dem_nodes + I_container[d, 1] = JuMP.@variable( + jump_model, base_name = "I_$(d)", + lower_bound = I_DEM_MIN, upper_bound = I_DEM_MAX, + ) + end + + # --- Dispatch to the chosen bilinear approximation method --- + bilinear_fn! = if method === :sos2 + (cont, names, xc, yc, ylo, yhi, meta) -> + IOM._add_sos2_bilinear_approx!( + cont, NetworkNode, names, time_steps, xc, yc, + V_MIN, V_MAX, ylo, yhi, refinement, meta, + ) + elseif method === :manual_sos2 + (cont, names, xc, yc, ylo, yhi, meta) -> + IOM._add_manual_sos2_bilinear_approx!( + cont, NetworkNode, names, time_steps, xc, yc, + V_MIN, V_MAX, ylo, yhi, refinement, meta, + ) + elseif method === :sawtooth + (cont, names, xc, yc, ylo, yhi, meta) -> + IOM._add_sawtooth_bilinear_approx!( + cont, NetworkNode, names, time_steps, xc, yc, + V_MIN, V_MAX, ylo, yhi, refinement, meta, + ) + elseif method === :hybs + (cont, names, xc, yc, ylo, yhi, meta) -> + IOM._add_hybs_bilinear_approx!( + cont, NetworkNode, names, time_steps, xc, yc, + V_MIN, V_MAX, ylo, yhi, refinement, meta, + ) + elseif method === :dnmdt + (cont, names, xc, yc, ylo, yhi, meta) -> + IOM._add_dnmdt_bilinear_approx!( + cont, NetworkNode, names, time_steps, xc, yc, + V_MIN, V_MAX, ylo, yhi, refinement, meta, + ) + else + error("Unknown method: $method. Use :sos2, :manual_sos2, :sawtooth, :hybs, or :dnmdt.") + end + + # Generator bilinear: z_gen ≈ V · I_gen + bilinear_fn!( + container, net.gen_nodes, V_container, I_container, + I_GEN_MIN, I_GEN_MAX, "gen", + ) + + # Demand bilinear: z_dem ≈ V · I_dem + bilinear_fn!( + container, net.dem_nodes, V_container, I_container, + I_DEM_MIN, I_DEM_MAX, "dem", + ) + + # Retrieve expression containers from the optimization container + expr_type = _get_expression_type(method) + z_gen = IOM.get_expression(container, expr_type, NetworkNode, "gen") + z_dem = IOM.get_expression(container, expr_type, NetworkNode, "dem") + + # --- Remaining linear model components (directly in JuMP) --- + Pg = Dict{String, JuMP.VariableRef}() + delta = Dict{Tuple{String, Int}, JuMP.VariableRef}() + + for g in net.gen_nodes + Pg[g] = JuMP.@variable(jump_model, base_name = "Pg_$(g)", + lower_bound = 0.0, upper_bound = P_MAX) + for k in 1:(net.K) + delta[(g, k)] = JuMP.@variable(jump_model, base_name = "delta_$(g)_$(k)", + lower_bound = 0.0, upper_bound = net.segment_widths[g][k]) + end + end + + # Objective: min Σ m_{i,k} · δ_{i,k} + JuMP.@objective(jump_model, Min, sum( + net.marginal_costs[g][k] * delta[(g, k)] + for g in net.gen_nodes, k in 1:(net.K) + )) + + # Pg == bilinear approx of V·I (generators) + for g in net.gen_nodes + JuMP.@constraint(jump_model, Pg[g] == z_gen[(g, 1)]) + end + + # V·I == -d (demands) + for d in net.dem_nodes + JuMP.@constraint(jump_model, z_dem[(d, 1)] == -net.demands[d]) + end + + # KCL: I_i = Σ g_{ij}(V_i - V_j) + for n in net.all_nodes + JuMP.@constraint(jump_model, + I_container[n, 1] == sum( + c * (V_container[n, 1] - V_container[j, 1]) + for (j, c) in adj[n] + )) + end + + # Delta link: Pg = Σ δ_{g,k} + for g in net.gen_nodes + JuMP.@constraint(jump_model, + Pg[g] == sum(delta[(g, k)] for k in 1:(net.K))) + end + + return (; container, jump_model, V_container, I_container, z_gen, z_dem) +end + +# ─── Metrics ────────────────────────────────────────────────────────────────── + +function compute_bilinear_residuals(result, net) + V = result.V_container + I = result.I_container + residuals = Float64[] + for g in net.gen_nodes + v_val = JuMP.value(V[g, 1]) + i_val = JuMP.value(I[g, 1]) + z_val = JuMP.value(result.z_gen[(g, 1)]) + push!(residuals, abs(v_val * i_val - z_val)) + end + for d in net.dem_nodes + v_val = JuMP.value(V[d, 1]) + i_val = JuMP.value(I[d, 1]) + z_val = JuMP.value(result.z_dem[(d, 1)]) + push!(residuals, abs(v_val * i_val - z_val)) + end + return residuals +end + +function model_size(jump_model) + nv = JuMP.num_variables(jump_model) + nc = sum( + JuMP.num_constraints(jump_model, f, s) + for (f, s) in JuMP.list_of_constraint_types(jump_model) + ) + nb = JuMP.num_constraints(jump_model, JuMP.VariableRef, JuMP.MOI.ZeroOne) + return (; variables = nv, constraints = nc, binaries = nb) +end + +# ─── Main benchmark ────────────────────────────────────────────────────────── + +""" + run_benchmark(; N, K, seed, segment_counts, depths) + +Run the full benchmark comparing all five bilinear approximation methods. + +# Keyword arguments +- `N::Int = 10`: number of nodes (must be even) +- `K::Int = 3`: number of PWL cost segments per generator +- `seed::Int = 42`: random seed for network generation +- `segment_counts::Vector{Int} = [2, 4, 8, 16]`: segment counts for SOS2 methods +- `depths::Vector{Int} = [2, 4, 8]`: depths for sawtooth, HybS, and D-NMDT methods +""" +function run_benchmark(; + N::Int = 10, + K::Int = 3, + seed::Int = 42, + segment_counts::Vector{Int} = [2, 4, 8, 16], + depths::Vector{Int} = [2, 4, 8], +) + net = generate_network(; N, K, seed) + println("Network: $(net.N) nodes, $(length(net.edges)) edges, $(net.K) cost segments") + println("Generators: $(length(net.gen_nodes)), Demands: $(length(net.dem_nodes))") + println() + + # --- NLP reference --- + nlp_obj = NaN + if HAS_IPOPT + println("=" ^ 100) + println("NLP Reference (Ipopt, bilinear V·I constraints)") + println("=" ^ 100) + nlp_model = build_nlp_model(net) + JuMP.set_optimizer(nlp_model, Ipopt.Optimizer) + JuMP.set_optimizer_attribute(nlp_model, "print_level", 0) + nlp_t = @elapsed JuMP.optimize!(nlp_model) + nlp_status = JuMP.termination_status(nlp_model) + sz = model_size(nlp_model) + if nlp_status == JuMP.LOCALLY_SOLVED + nlp_obj = JuMP.objective_value(nlp_model) + end + @printf(" Status: %s\n", nlp_status) + @printf(" Objective: %.6f\n", nlp_obj) + @printf(" Variables: %d\n", sz.variables) + @printf(" Constraints: %d\n", sz.constraints) + @printf(" Solve time: %.4f s\n", nlp_t) + else + println("Ipopt not available — skipping NLP reference.") + println("Install Ipopt.jl in the test environment for NLP comparison.") + end + println() + + # --- MIP benchmarks --- + method_configs = [ + (:sos2, "Solver SOS2", segment_counts), + (:manual_sos2, "Manual SOS2", segment_counts), + (:sawtooth, "Sawtooth", depths), + (:hybs, "HybS", depths), + (:dnmdt, "D-NMDT", depths), + ] + + println("=" ^ 100) + println("MIP Bilinear Approximations (HiGHS)") + println(" Refinement = num_segments for SOS2 methods, depth for Sawtooth/HybS/D-NMDT") + println("=" ^ 100) + @printf("%-14s %5s %7s %8s %7s %12s %9s %10s %10s %10s\n", + "Method", "Ref", "Vars", "Constrs", "Bins", "Objective", + "Gap(%)", "Max Resid", "Build(s)", "Solve(s)") + println("-" ^ 100) + + for (method, label, refs) in method_configs + for ref in refs + build_t = @elapsed begin + result = build_mip_model(net, method, ref) + end + + JuMP.set_optimizer(result.jump_model, HiGHS.Optimizer) + JuMP.set_optimizer_attribute(result.jump_model, "log_to_console", false) + JuMP.set_optimizer_attribute(result.jump_model, "time_limit", 300.0) + + solve_t = @elapsed JuMP.optimize!(result.jump_model) + status = JuMP.termination_status(result.jump_model) + sz = model_size(result.jump_model) + + if status in (JuMP.OPTIMAL, JuMP.TIME_LIMIT) && + JuMP.has_values(result.jump_model) + obj = JuMP.objective_value(result.jump_model) + gap = isnan(nlp_obj) ? NaN : + abs(nlp_obj - obj) / max(abs(nlp_obj), 1e-10) * 100.0 + resids = compute_bilinear_residuals(result, net) + gap_str = isnan(gap) ? " -" : @sprintf("%8.4f", gap) + @printf("%-14s %5d %7d %8d %7d %12.6f %s %10.2e %10.4f %10.4f\n", + label, ref, + sz.variables, sz.constraints, sz.binaries, + obj, gap_str, maximum(resids), build_t, solve_t) + else + @printf("%-14s %5d %7d %8d %7d %12s %9s %10s %10.4f %10.4f\n", + label, ref, + sz.variables, sz.constraints, sz.binaries, + string(status), "-", "-", build_t, solve_t) + end + end + println() + end + println("=" ^ 100) +end + +# ─── Entry point ────────────────────────────────────────────────────────────── + +if abspath(PROGRAM_FILE) == @__FILE__ + N = get(ARGS, 1, "10") |> x -> parse(Int, x) + K = get(ARGS, 2, "3") |> x -> parse(Int, x) + seed = get(ARGS, 3, "42") |> x -> parse(Int, x) + run_benchmark(; N, K, seed) +end diff --git a/src/InfrastructureOptimizationModels.jl b/src/InfrastructureOptimizationModels.jl index 9325f8f..1ce147f 100644 --- a/src/InfrastructureOptimizationModels.jl +++ b/src/InfrastructureOptimizationModels.jl @@ -562,14 +562,14 @@ include("common_models/add_constraint_dual.jl") include("common_models/add_jump_expressions.jl") # helpers only used in POM. include("common_models/set_expression.jl") # helpers only used in POM. include("common_models/get_time_series.jl") -include("common_models/add_pwl_methods.jl") +# PWL interpolation methods moved to quadratic_approximations/ include("common_models/constraint_helpers.jl") include("common_models/range_constraint.jl") include("common_models/duration_constraints.jl") include("common_models/rateofchange_constraints.jl") # Objective function implementations -include("objective_function/cost_term_helpers.jl") # generic helpers: add_cost_term_{invariant,variant}!, PWL helpers +include("objective_function/cost_term_helpers.jl") # generic helpers: add_cost_term_{invariant,variant}! include("objective_function/common.jl") include("objective_function/proportional.jl") # add_proportional_cost! and add_proportional_cost_maybe_time_variant! include("objective_function/start_up_shut_down.jl") # add_{start_up, shut_down}_cost! @@ -579,20 +579,33 @@ include("objective_function/linear_curve.jl") include("objective_function/quadratic_curve.jl") include("objective_function/import_export.jl") -# add_variable_cost! implementations, but "it's complicated." Other stuff exported too -include("objective_function/piecewise_linear.jl") -# Offer curve types must come before market_bid.jl +# Offer curve types (pure type definitions, no dependencies) include("objective_function/offer_curve_types.jl") -include("objective_function/market_bid.jl") + +# Pure PWL formulation math (must come before cost-data-specific files) +include("objective_function/objective_function_pwl_lambda.jl") # lambda/convex combination PWL +include("objective_function/objective_function_pwl_delta.jl") # delta/incremental block PWL + +# Cost-data-specific mapping to PWL formulations +include("objective_function/piecewise_linear.jl") # CostCurve/FuelCurve → lambda PWL +include("objective_function/market_bid.jl") # OfferCurveCost → delta PWL # Quadratic approximations (PWL via SOS2) +include("quadratic_approximations/common.jl") +include("quadratic_approximations/pwl_utils.jl") +include("quadratic_approximations/incremental.jl") include("quadratic_approximations/solver_sos2.jl") include("quadratic_approximations/manual_sos2.jl") include("quadratic_approximations/sawtooth.jl") -include("quadratic_approximations/mccormick.jl") include("quadratic_approximations/epigraph.jl") -include("quadratic_approximations/bilinear.jl") -include("quadratic_approximations/hybs.jl") + +# Bilinear approximations (x·y via Bin2/HybS decomposition) +include("bilinear_approximations/mccormick.jl") +include("bilinear_approximations/bilinear.jl") +include("bilinear_approximations/hybs.jl") + +# DNMDT uses BilinearProductVariable from bilinear.jl — must come after bilinear_approximations +include("quadratic_approximations/dnmdt.jl") # add_param_container! wrappers — must come after piecewise_linear.jl # (which defines VariableValueParameter and FixValueParameter) diff --git a/src/quadratic_approximations/bilinear.jl b/src/bilinear_approximations/bilinear.jl similarity index 75% rename from src/quadratic_approximations/bilinear.jl rename to src/bilinear_approximations/bilinear.jl index b465c26..b840046 100644 --- a/src/quadratic_approximations/bilinear.jl +++ b/src/bilinear_approximations/bilinear.jl @@ -42,8 +42,8 @@ function _add_bilinear_approx_impl!( ::Type{C}, names::Vector{String}, time_steps::UnitRange{Int}, - x_var_container, - y_var_container, + x_var, + y_var, x_min::Float64, x_max::Float64, y_min::Float64, @@ -62,8 +62,7 @@ function _add_bilinear_approx_impl!( meta_x = meta * "_x" meta_y = meta * "_y" - # Create p expression container - p_container = add_expression_container!( + p_expr = add_expression_container!( container, VariableSumExpression(), C, @@ -71,91 +70,63 @@ function _add_bilinear_approx_impl!( time_steps; meta = meta_plus, ) - for name in names, t in time_steps - p_expr = JuMP.AffExpr(0.0) - JuMP.add_to_expression!(p_expr, x_var_container[name, t]) - JuMP.add_to_expression!(p_expr, y_var_container[name, t]) - p_container[name, t] = p_expr + p = JuMP.AffExpr(0.0) + JuMP.add_to_expression!(p, x_var[name, t]) + JuMP.add_to_expression!(p, y_var[name, t]) + p_expr[name, t] = p end # Approximate p², x², y² using the provided quadratic approximation function - quad_approx_fn( - container, C, names, time_steps, p_container, p_min, p_max, meta_plus, - ) - quad_approx_fn( - container, C, names, time_steps, x_var_container, x_min, x_max, meta_x, - ) - quad_approx_fn( - container, C, names, time_steps, y_var_container, y_min, y_max, meta_y, - ) - - # Retrieve quadratic approximation expression containers - zp_container = get_expression( - container, QuadraticApproximationExpression(), C, meta_plus, + zp_expr = quad_approx_fn( + container, C, names, time_steps, p_expr, p_min, p_max, meta_plus, ) - zx_container = get_expression( - container, QuadraticApproximationExpression(), C, meta_x, + zx_expr = quad_approx_fn( + container, C, names, time_steps, x_var, x_min, x_max, meta_x, ) - zy_container = get_expression( - container, QuadraticApproximationExpression(), C, meta_y, + zy_expr = quad_approx_fn( + container, C, names, time_steps, y_var, y_min, y_max, meta_y, ) - z_container = add_variable_container!( - container, - BilinearProductVariable(), - C, - names, - time_steps; - meta, - ) + z_var = @_add_container(variable, BilinearProductVariable) + link_cons = @_add_container(constraints, BilinearProductLinkingConstraint) + result_expr = @_add_container(expression, BilinearProductExpression) - link_container = add_constraints_container!( - container, - BilinearProductLinkingConstraint(), - C, - names, - time_steps; - meta, - ) - expr_container = add_expression_container!( - container, - BilinearProductExpression(), - C, - names, - time_steps; - meta, - ) + # Compute valid bounds for z = x·y from variable bounds + z_lo = min(x_min * y_min, x_min * y_max, x_max * y_min, x_max * y_max) + z_hi = max(x_min * y_min, x_min * y_max, x_max * y_min, x_max * y_max) for name in names, t in time_steps # It's not necessary to create a variable container here, but it is # necessary in HybS, so this is here for symmetry. - z_var = JuMP.@variable( - jump_model, - base_name = "BilinearProduct_$(C)_{$(name), $(t)}", - ) - z_container[name, t] = z_var + z = + z_var[name, t] = JuMP.@variable( + jump_model, + base_name = "BilinearProduct_$(C)_{$(name), $(t)}", + lower_bound = z_lo, + upper_bound = z_hi, + ) # z = (1/2) * (p² − x² - y²) z_expr = JuMP.AffExpr(0.0) - JuMP.add_to_expression!(z_expr, 0.5, zp_container[name, t]) - JuMP.add_to_expression!(z_expr, -0.5, zx_container[name, t]) - JuMP.add_to_expression!(z_expr, -0.5, zy_container[name, t]) - link_container[name, t] = JuMP.@constraint(jump_model, z_var == z_expr) + JuMP.add_to_expression!(z_expr, 0.5, zp_expr[name, t]) + JuMP.add_to_expression!(z_expr, -0.5, zx_expr[name, t]) + JuMP.add_to_expression!(z_expr, -0.5, zy_expr[name, t]) + link_cons[name, t] = JuMP.@constraint(jump_model, z == z_expr) - expr_container[name, t] = JuMP.AffExpr(0.0, z_var => 1.0) + result_expr[name, t] = JuMP.AffExpr(0.0, z => 1.0) end # Optional McCormick envelope if add_mccormick _add_mccormick_envelope!( container, C, names, time_steps, - x_var_container, y_var_container, z_container, + x_var, y_var, z_var, x_min, x_max, y_min, y_max, meta, ) end - return + return result_expr end """ @@ -185,13 +156,12 @@ function _add_sos2_bilinear_approx!( quad_fn = (cont, CT, nms, ts, vc, lo, hi, m) -> _add_sos2_quadratic_approx!(cont, CT, nms, ts, vc, lo, hi, num_segments, m) - _add_bilinear_approx_impl!( + return _add_bilinear_approx_impl!( container, C, names, time_steps, x_var_container, y_var_container, x_min, x_max, y_min, y_max, quad_fn, meta; add_mccormick, ) - return end """ @@ -231,13 +201,12 @@ function _add_manual_sos2_bilinear_approx!( num_segments, m, ) - _add_bilinear_approx_impl!( + return _add_bilinear_approx_impl!( container, C, names, time_steps, x_var_container, y_var_container, x_min, x_max, y_min, y_max, quad_fn, meta; add_mccormick, ) - return end """ @@ -267,11 +236,10 @@ function _add_sawtooth_bilinear_approx!( quad_fn = (cont, CT, nms, ts, vc, lo, hi, m) -> _add_sawtooth_quadratic_approx!(cont, CT, nms, ts, vc, lo, hi, depth, m) - _add_bilinear_approx_impl!( + return _add_bilinear_approx_impl!( container, C, names, time_steps, x_var_container, y_var_container, x_min, x_max, y_min, y_max, quad_fn, meta; add_mccormick, ) - return end diff --git a/src/quadratic_approximations/hybs.jl b/src/bilinear_approximations/hybs.jl similarity index 53% rename from src/quadratic_approximations/hybs.jl rename to src/bilinear_approximations/hybs.jl index 847acd7..13e088c 100644 --- a/src/quadratic_approximations/hybs.jl +++ b/src/bilinear_approximations/hybs.jl @@ -7,7 +7,7 @@ struct HybSBoundConstraint <: ConstraintType end """ - _add_hybs_bilinear_approx!(container, C, names, time_steps, x_var_container, y_var_container, x_min, x_max, y_min, y_max, depth, meta; add_mccormick) + _add_hybs_bilinear_approx!(container, C, names, time_steps, x_var, y_var, x_min, x_max, y_min, y_max, depth, meta; add_mccormick) Approximate x·y using the HybS (Hybrid Separable) relaxation from Beach et al. (2024). @@ -25,8 +25,8 @@ Stores affine expressions approximating x·y in a `HybSProductExpression` expres - `::Type{C}`: component type - `names::Vector{String}`: component names - `time_steps::UnitRange{Int}`: time periods -- `x_var_container`: container of x variables indexed by (name, t) -- `y_var_container`: container of y variables indexed by (name, t) +- `x_var`: container of x variables indexed by (name, t) +- `y_var`: container of y variables indexed by (name, t) - `x_min::Float64`: lower bound of x - `x_max::Float64`: upper bound of x - `y_min::Float64`: lower bound of y @@ -40,8 +40,8 @@ function _add_hybs_bilinear_approx!( ::Type{C}, names::Vector{String}, time_steps::UnitRange{Int}, - x_var_container, - y_var_container, + x_var, + y_var, x_min::Float64, x_max::Float64, y_min::Float64, @@ -66,138 +66,89 @@ function _add_hybs_bilinear_approx!( meta_p1 = meta * "_plus" meta_p2 = meta * "_diff" - p1_container = add_expression_container!( - container, - VariableSumExpression(), - C, - names, - time_steps; - meta = meta_p1, - ) - - p2_container = add_expression_container!( - container, - VariableDifferenceExpression(), - C, - names, - time_steps; - meta = meta_p2, - ) + p1_expr = @_add_container(expression, VariableSumExpression, meta_p1) + p2_expr = @_add_container(expression, VariableDifferenceExpression, meta_p2) for name in names, t in time_steps - x = x_var_container[name, t] - y = y_var_container[name, t] + x = x_var[name, t] + y = y_var[name, t] # p1 = x + y - p1_expr = JuMP.AffExpr(0.0) - JuMP.add_to_expression!(p1_expr, x) - JuMP.add_to_expression!(p1_expr, y) - p1_container[name, t] = p1_expr + p1 = p1_expr[name, t] = JuMP.AffExpr(0.0) + JuMP.add_to_expression!(p1, x) + JuMP.add_to_expression!(p1, y) # p2 = x − y - p2_expr = JuMP.AffExpr(0.0) - JuMP.add_to_expression!(p2_expr, x) - JuMP.add_to_expression!(p2_expr, -1.0, y) - p2_container[name, t] = p2_expr + p2 = p2_expr[name, t] = JuMP.AffExpr(0.0) + JuMP.add_to_expression!(p2, x) + JuMP.add_to_expression!(p2, -1.0, y) end # --- Sawtooth S^L for x² and y² (binary variables here) --- - _add_sawtooth_quadratic_approx!( + zx_expr = _add_sawtooth_quadratic_approx!( container, C, names, time_steps, - x_var_container, x_min, x_max, depth, meta_x, + x_var, x_min, x_max, depth, meta_x, ) - _add_sawtooth_quadratic_approx!( + zy_expr = _add_sawtooth_quadratic_approx!( container, C, names, time_steps, - y_var_container, y_min, y_max, depth, meta_y, + y_var, y_min, y_max, depth, meta_y, ) # --- Epigraph Q^{L1} lower bound for (x+y)² and (x−y)² (no binaries) --- - _add_epigraph_quadratic_approx!( + zp1_expr = _add_epigraph_quadratic_approx!( container, C, names, time_steps, - p1_container, p1_min, p1_max, depth, meta_p1, + p1_expr, p1_min, p1_max, depth, meta_p1, ) - _add_epigraph_quadratic_approx!( + zp2_expr = _add_epigraph_quadratic_approx!( container, C, names, time_steps, - p2_container, p2_min, p2_max, depth, meta_p2, - ) - - # Retrieve expression containers - zx_container = get_expression( - container, QuadraticApproximationExpression(), C, meta_x, - ) - zy_container = get_expression( - container, QuadraticApproximationExpression(), C, meta_y, - ) - zp1_container = get_expression( - container, EpigraphExpression(), C, meta_p1, - ) - zp2_container = get_expression( - container, EpigraphExpression(), C, meta_p2, + p2_expr, p2_min, p2_max, depth, meta_p2, ) # --- Create z variable and two-sided HybS bounds --- - z_container = add_variable_container!( - container, - BilinearProductVariable(), - C, - names, - time_steps; - meta, - ) - hybs_bound_container = add_constraints_container!( - container, - HybSBoundConstraint(), - C, - names, - 1:2, - time_steps; - meta, - sparse = true, - ) + z_var = @_add_container(variable, BilinearProductVariable) + hybrid_cons = @_add_container(constraints, HybSBoundConstraint, 1:2, sparse) + result_expr = @_add_container(expression, BilinearProductExpression) - expr_container = add_expression_container!( - container, - BilinearProductExpression(), - C, - names, - time_steps; - meta, - ) + # Compute valid bounds for z ≈ x·y from variable bounds + z_lo = min(x_min * y_min, x_min * y_max, x_max * y_min, x_max * y_max) + z_hi = max(x_min * y_min, x_min * y_max, x_max * y_min, x_max * y_max) for name in names, t in time_steps - z_var = JuMP.@variable( - jump_model, - base_name = "HybSProduct_$(C)_{$(name), $(t)}", - ) - z_container[name, t] = z_var - - zx = zx_container[name, t] - zy = zy_container[name, t] - zp1 = zp1_container[name, t] - zp2 = zp2_container[name, t] + z = + z_var[name, t] = JuMP.@variable( + jump_model, + base_name = "HybSProduct_$(C)_{$(name), $(t)}", + lower_bound = z_lo, + upper_bound = z_hi, + ) + + zx = zx_expr[name, t] + zy = zy_expr[name, t] + zp1 = zp1_expr[name, t] + zp2 = zp2_expr[name, t] # Bin2 lower bound: z ≥ ½(z_p1 − z_x − z_y) - hybs_bound_container[(name, 1, t)] = JuMP.@constraint( + hybrid_cons[(name, 1, t)] = JuMP.@constraint( jump_model, - z_var >= 0.5 * (zp1 - zx - zy), + z >= 0.5 * (zp1 - zx - zy), ) # Bin3 upper bound: z ≤ ½(z_x + z_y − z_p2) - hybs_bound_container[(name, 2, t)] = JuMP.@constraint( + hybrid_cons[(name, 2, t)] = JuMP.@constraint( jump_model, - z_var <= 0.5 * (zx + zy - zp2), + z <= 0.5 * (zx + zy - zp2), ) - expr_container[name, t] = JuMP.AffExpr(0.0, z_var => 1.0) + result_expr[name, t] = JuMP.AffExpr(0.0, z => 1.0) end # --- Step 6: McCormick envelope for additional tightening --- if add_mccormick _add_mccormick_envelope!( container, C, names, time_steps, - x_var_container, y_var_container, z_container, + x_var, y_var, z_var, x_min, x_max, y_min, y_max, meta, ) end - return + return result_expr end diff --git a/src/quadratic_approximations/mccormick.jl b/src/bilinear_approximations/mccormick.jl similarity index 69% rename from src/quadratic_approximations/mccormick.jl rename to src/bilinear_approximations/mccormick.jl index 6e4f977..53629c2 100644 --- a/src/quadratic_approximations/mccormick.jl +++ b/src/bilinear_approximations/mccormick.jl @@ -4,7 +4,7 @@ struct McCormickConstraint <: ConstraintType end """ - _add_mccormick_envelope!(container, C, names, time_steps, x_var_container, y_var_container, z_var_container, x_min, x_max, y_min, y_max, meta) + _add_mccormick_envelope!(container, C, names, time_steps, x_var, y_var, z_var, x_min, x_max, y_min, y_max, meta) Add McCormick envelope constraints for the bilinear product z ≈ x·y. @@ -21,9 +21,9 @@ z ≤ x_min·y + x·y_max − x_min·y_max - `::Type{C}`: component type - `names::Vector{String}`: component names - `time_steps::UnitRange{Int}`: time periods -- `x_var_container`: container of x variables indexed by (name, t) -- `y_var_container`: container of y variables indexed by (name, t) -- `z_var_container`: container of z variables indexed by (name, t) +- `x_var`: container of x variables indexed by (name, t) +- `y_var`: container of y variables indexed by (name, t) +- `z_var`: container of z variables indexed by (name, t) - `x_min::Float64`: lower bound of x - `x_max::Float64`: upper bound of x - `y_min::Float64`: lower bound of y @@ -38,9 +38,9 @@ function _add_mccormick_envelope!( ::Type{C}, names::Vector{String}, time_steps::UnitRange{Int}, - x_var_container, - y_var_container, - z_var_container, + x_var, + y_var, + z_var, x_min::Float64, x_max::Float64, y_min::Float64, @@ -51,39 +51,30 @@ function _add_mccormick_envelope!( IS.@assert_op y_max > y_min jump_model = get_jump_model(container) - mc_container = add_constraints_container!( - container, - McCormickConstraint(), - C, - names, - 1:4, - time_steps; - meta, - sparse = true, - ) + mc_cons = @_add_container(constraints, McCormickConstraint, 1:4, sparse) for name in names, t in time_steps - x = x_var_container[name, t] - y = y_var_container[name, t] - z = z_var_container[name, t] + x = x_var[name, t] + y = y_var[name, t] + z = z_var[name, t] # z ≥ x_min·y + x·y_min − x_min·y_min - mc_container[(name, 1, t)] = JuMP.@constraint( + mc_cons[(name, 1, t)] = JuMP.@constraint( jump_model, z >= x_min * y + x * y_min - x_min * y_min, ) # z ≥ x_max·y + x·y_max − x_max·y_max - mc_container[(name, 2, t)] = JuMP.@constraint( + mc_cons[(name, 2, t)] = JuMP.@constraint( jump_model, z >= x_max * y + x * y_max - x_max * y_max, ) # z ≤ x_max·y + x·y_min − x_max·y_min - mc_container[(name, 3, t)] = JuMP.@constraint( + mc_cons[(name, 3, t)] = JuMP.@constraint( jump_model, z <= x_max * y + x * y_min - x_max * y_min, ) # z ≤ x_min·y + x·y_max − x_min·y_max - mc_container[(name, 4, t)] = JuMP.@constraint( + mc_cons[(name, 4, t)] = JuMP.@constraint( jump_model, z <= x_min * y + x * y_max - x_min * y_max, ) diff --git a/src/objective_function/cost_term_helpers.jl b/src/objective_function/cost_term_helpers.jl index 827b834..19cd3be 100644 --- a/src/objective_function/cost_term_helpers.jl +++ b/src/objective_function/cost_term_helpers.jl @@ -156,234 +156,3 @@ function add_proportional_cost_invariant!( end return end - -####################################### -######## PWL Helper Functions ######### -####################################### - -""" -Create PWL delta variables for a component at a given time period. - -Creates `n_points` variables with specified bounds. - -# Arguments -- `container`: the optimization container -- `V`: variable type for the delta variables (caller provides) -- `C`: component type -- `name`: component name -- `t`: time period -- `n_points`: number of PWL points (= number of delta variables) -- `upper_bound`: upper bound for variables (default 1.0 for convex combination formulation; - use `Inf` for block offer formulation) - -# Returns -Vector of the created JuMP variables. -""" -function add_pwl_variables!( - container::OptimizationContainer, - ::Type{V}, - ::Type{C}, - name::String, - t::Int, - n_points::Int; - upper_bound::Float64 = 1.0, -) where {V <: SparseVariableType, C <: IS.InfrastructureSystemsComponent} - # SparseVariableType dispatch automatically creates container with (String, Int, Int) keys - # axes are (name, pwl_index, time_step). - var_container = lazy_container_addition!(container, V(), C) - pwl_vars = Vector{JuMP.VariableRef}(undef, n_points) - jump_model = get_jump_model(container) - for i in 1:n_points - if isfinite(upper_bound) - pwl_vars[i] = - var_container[(name, i, t)] = JuMP.@variable( - jump_model, - base_name = "$(V)_$(C)_{$(name), pwl_$(i), $(t)}", - lower_bound = 0.0, - upper_bound = upper_bound, - ) - else - pwl_vars[i] = - var_container[(name, i, t)] = JuMP.@variable( - jump_model, - base_name = "$(V)_$(C)_{$(name), pwl_$(i), $(t)}", - lower_bound = 0.0, - ) - end - end - return pwl_vars -end - -""" -Add PWL linking constraint: power variable equals weighted sum of breakpoints. - - P[name, t] == Σ δ[i] * breakpoint[i] - -# Arguments -- `container`: the optimization container -- `K`: constraint type (caller provides) -- `C`: component type -- `name`: component name -- `t`: time period -- `power_var`: the power variable to link (JuMP variable reference) -- `pwl_vars`: vector of PWL delta variables -- `breakpoints`: vector of breakpoint values (in p.u.) -""" -function add_pwl_linking_constraint!( - container::OptimizationContainer, - ::Type{K}, - ::Type{C}, - name::String, - t::Int, - power_var::JuMP.VariableRef, - pwl_vars::Vector{JuMP.VariableRef}, - breakpoints::Vector{Float64}, -) where {K <: ConstraintType, C <: IS.InfrastructureSystemsComponent} - @assert length(pwl_vars) == length(breakpoints) - # Create sparse container with (name, time) indexing if it doesn't exist - if !has_container_key(container, K, C) - con_key = ConstraintKey(K, C) - contents = Dict{Tuple{String, Int}, Union{Nothing, JuMP.ConstraintRef}}() - _assign_container!( - container.constraints, - con_key, - JuMP.Containers.SparseAxisArray(contents), - ) - end - con_container = get_constraint(container, K(), C) - jump_model = get_jump_model(container) - con_container[name, t] = JuMP.@constraint( - jump_model, - power_var == sum(pwl_vars[i] * breakpoints[i] for i in eachindex(breakpoints)) - ) - return -end - -""" -Add PWL normalization constraint: delta variables sum to on_status. - - Σ δ[i] == on_status - -# Arguments -- `container`: the optimization container -- `K`: constraint type (caller provides) -- `C`: component type -- `name`: component name -- `t`: time period -- `pwl_vars`: vector of PWL delta variables -- `on_status`: the on/off status (1.0, or a JuMP variable/parameter) -""" -function add_pwl_normalization_constraint!( - container::OptimizationContainer, - ::Type{K}, - ::Type{C}, - name::String, - t::Int, - pwl_vars::Vector{JuMP.VariableRef}, - on_status::JuMPOrFloat, -) where {K <: ConstraintType, C <: IS.InfrastructureSystemsComponent} - # Create sparse container with (name, time) indexing if it doesn't exist - if !has_container_key(container, K, C) - con_key = ConstraintKey(K, C) - contents = Dict{Tuple{String, Int}, Union{Nothing, JuMP.ConstraintRef}}() - _assign_container!( - container.constraints, - con_key, - JuMP.Containers.SparseAxisArray(contents), - ) - end - con_container = get_constraint(container, K(), C) - jump_model = get_jump_model(container) - con_container[name, t] = JuMP.@constraint( - jump_model, - sum(pwl_vars) == on_status - ) - return -end - -""" -Add SOS2 constraint for PWL variables (required for non-convex curves). - -# Arguments -- `container`: the optimization container -- `C`: component type -- `name`: component name -- `t`: time period -- `pwl_vars`: vector of PWL delta variables -""" -function add_pwl_sos2_constraint!( - container::OptimizationContainer, - ::Type{C}, - name::String, - t::Int, - pwl_vars::Vector{JuMP.VariableRef}, -) where {C <: IS.InfrastructureSystemsComponent} - jump_model = get_jump_model(container) - n_points = length(pwl_vars) - JuMP.@constraint(jump_model, pwl_vars in MOI.SOS2(collect(1:n_points))) - return -end - -""" -Compute PWL cost expression from delta variables and slopes. - -Returns the cost expression without adding it to the objective (caller decides -whether to use invariant or variant). - - cost = Σ δ[i] * slope[i] * multiplier - -# Arguments -- `pwl_vars`: vector of PWL delta variables -- `slopes`: vector of slope values (cost per segment, already normalized) -- `multiplier`: additional multiplier (e.g., dt for time resolution) - -# Returns -JuMP affine expression representing the cost. -""" -function get_pwl_cost_expression( - pwl_vars::Vector{JuMP.VariableRef}, # PERF allow views? - slopes::Vector{Float64}, - multiplier::Float64, -) - @assert length(pwl_vars) == length(slopes) - cost = JuMP.AffExpr(0.0) - for (i, slope) in enumerate(slopes) - JuMP.add_to_expression!(cost, slope * multiplier, pwl_vars[i]) - end - return cost -end - -""" -Add block-offer PWL constraints: linking constraint and per-block upper bounds. - - power_var == Σ δ[k] + min_power_offset - δ[k] <= breakpoints[k+1] - breakpoints[k] for each k - -# Arguments -- `jump_model`: the JuMP model -- `con_container`: constraint container to store the linking constraint (indexed [name, t]) -- `name`: component name -- `t`: time period -- `power_var`: the power variable being linked -- `pwl_vars`: vector of block-offer delta variables (length = n_blocks) -- `breakpoints`: vector of breakpoint values (length = n_blocks + 1) -- `min_power_offset`: offset for minimum generation power (default 0.0) -""" -function add_pwl_block_offer_constraints!( - jump_model::JuMP.Model, - con_container, - name::String, - t::Int, - power_var::JuMPOrFloat, - pwl_vars::Vector{JuMP.VariableRef}, - breakpoints::Vector{<:JuMPOrFloat}, - min_power_offset::JuMPOrFloat = 0.0, -) - @assert length(pwl_vars) == length(breakpoints) - 1 - sum_pwl = sum(pwl_vars) + min_power_offset - con_container[name, t] = JuMP.@constraint(jump_model, power_var == sum_pwl) - for (ix, var) in enumerate(pwl_vars) - JuMP.@constraint(jump_model, var <= breakpoints[ix + 1] - breakpoints[ix]) - end - return -end diff --git a/src/objective_function/market_bid.jl b/src/objective_function/market_bid.jl index 4333d79..69c665e 100644 --- a/src/objective_function/market_bid.jl +++ b/src/objective_function/market_bid.jl @@ -466,80 +466,6 @@ function process_market_bid_parameters!( end end -################################################################################# -# Section 8: Min-Gen-Power Dispatch Defaults -# POM overrides these for specific device types and formulations. -################################################################################# - -_include_min_gen_power_in_constraint( - ::Type, - ::VariableType, - ::AbstractDeviceFormulation, -) = false - -_include_constant_min_gen_power_in_constraint( - ::Type, - ::VariableType, - ::AbstractDeviceFormulation, -) = false - -################################################################################# -# Section 9: PWL Block Offer Constraints (generic) -# The ReserveDemandCurve-specific overload is in POM. -################################################################################# - -""" -Implement the constraints for PWL Block Offer variables. That is: - -```math -\\sum_{k\\in\\mathcal{K}} \\delta_{k,t} = p_t \\\\ -\\sum_{k\\in\\mathcal{K}} \\delta_{k,t} <= P_{k+1,t}^{max} - P_{k,t}^{max} -``` -""" -function _add_pwl_constraint!( - container::OptimizationContainer, - component::T, - ::U, - ::D, - break_points::Vector{<:JuMPOrFloat}, - pwl_vars::Vector{JuMP.VariableRef}, - period::Int, - ::Type{W}, -) where {T <: PSY.Component, U <: VariableType, - D <: AbstractDeviceFormulation, - W <: AbstractPiecewiseLinearBlockOfferConstraint} - variables = get_variable(container, U(), T) - const_container = lazy_container_addition!( - container, - W(), - T, - axes(variables)..., - ) - name = PSY.get_name(component) - - min_power_offset = if _include_constant_min_gen_power_in_constraint(T, U(), D()) - jump_fixed_value(first(break_points))::Float64 - elseif _include_min_gen_power_in_constraint(T, U(), D()) - p1::Float64 = jump_fixed_value(first(break_points)) - on_vars = get_variable(container, OnVariable(), T) - p1 * on_vars[name, period] - else - 0.0 - end - - add_pwl_block_offer_constraints!( - get_jump_model(container), - const_container, - name, - period, - variables[name, period], - pwl_vars, - break_points, - min_power_offset, - ) - return -end - ################################################################################# # Section 10: PWL Data Retrieval ################################################################################# @@ -598,8 +524,25 @@ end ################################################################################# """ -Add PWL cost terms for data coming from a MarketBidCost / ImportExportCost -with offer curves, dispatched on OfferDirection. +Add PWL cost terms using the **delta (incremental/block-offer) formulation**. + +Given an offer curve with breakpoints ``P_0, P_1, \\ldots, P_n`` and slopes +``m_1, m_2, \\ldots, m_n``, this function: + +1. Creates delta variables ``\\delta_k \\geq 0`` for each segment via [`add_pwl_variables!`](@ref), + with no upper bound (block sizes are enforced by constraints). +2. Adds linking and block-size constraints via [`_add_pwl_constraint!`](@ref): + ``p = \\sum_k \\delta_k`` and ``\\delta_k \\leq P_{k+1} - P_k``. +3. Builds the cost expression ``C = \\sum_k m_k \\, \\delta_k`` via [`get_pwl_cost_expression`](@ref). + +For convex offer curves (``m_1 \\leq m_2 \\leq \\cdots \\leq m_n``), no SOS2 or binary +variables are needed — the optimizer fills cheap segments first automatically. + +Dispatches on `OfferDirection` (incremental or decremental) to select the appropriate +variable and constraint types. + +See also: [`_add_pwl_term!`](@ref) for the lambda (convex combination) formulation used by +`CostCurve{PiecewisePointCurve}`. """ function add_pwl_term!( dir::OfferDirection, diff --git a/src/objective_function/objective_function_pwl_delta.jl b/src/objective_function/objective_function_pwl_delta.jl new file mode 100644 index 0000000..24eb945 --- /dev/null +++ b/src/objective_function/objective_function_pwl_delta.jl @@ -0,0 +1,213 @@ +################################################## +# PWL Delta (Incremental/Block) Formulation +# +# Pure formulation math for the delta/incremental PWL method. +# Variables δ_k >= 0 with block width bounds, +# P = Σ δ_k + offset, cost = Σ δ_k * slope_k. +# +# Cost-data-specific mapping (OfferCurveCost → slopes/breakpoints) +# stays in market_bid.jl. +################################################## + +################################################## +############## PWL Delta Variables ############### +################################################## + +""" +Create PWL delta variables for a component at a given time period. + +Creates `n_points` variables with specified bounds. + +# Arguments +- `container`: the optimization container +- `V`: variable type for the delta variables (caller provides) +- `C`: component type +- `name`: component name +- `t`: time period +- `n_points`: number of PWL points (= number of delta variables) +- `upper_bound`: upper bound for variables (default 1.0 for convex combination formulation; + use `Inf` for block offer formulation) + +# Returns +Vector of the created JuMP variables. +""" +function add_pwl_variables!( + container::OptimizationContainer, + ::Type{V}, + ::Type{C}, + name::String, + t::Int, + n_points::Int; + upper_bound::Float64 = 1.0, +) where {V <: SparseVariableType, C <: IS.InfrastructureSystemsComponent} + # SparseVariableType dispatch automatically creates container with (String, Int, Int) keys + # axes are (name, pwl_index, time_step). + var_container = lazy_container_addition!(container, V(), C) + pwl_vars = Vector{JuMP.VariableRef}(undef, n_points) + jump_model = get_jump_model(container) + for i in 1:n_points + if isfinite(upper_bound) + pwl_vars[i] = + var_container[(name, i, t)] = JuMP.@variable( + jump_model, + base_name = "$(V)_$(C)_{$(name), pwl_$(i), $(t)}", + lower_bound = 0.0, + upper_bound = upper_bound, + ) + else + pwl_vars[i] = + var_container[(name, i, t)] = JuMP.@variable( + jump_model, + base_name = "$(V)_$(C)_{$(name), pwl_$(i), $(t)}", + lower_bound = 0.0, + ) + end + end + return pwl_vars +end + +################################################## +############## PWL Delta Expressions ############# +################################################## + +""" +Compute PWL cost expression from delta variables and slopes. + +Returns the cost expression without adding it to the objective (caller decides +whether to use invariant or variant). + + cost = Σ δ[i] * slope[i] * multiplier + +# Arguments +- `pwl_vars`: vector of PWL delta variables +- `slopes`: vector of slope values (cost per segment, already normalized) +- `multiplier`: additional multiplier (e.g., dt for time resolution) + +# Returns +JuMP affine expression representing the cost. +""" +function get_pwl_cost_expression( + pwl_vars::AbstractVector{JuMP.VariableRef}, + slopes::AbstractVector{Float64}, + multiplier::Float64, +) + @assert length(pwl_vars) == length(slopes) + cost = JuMP.AffExpr(0.0) + for (i, slope) in enumerate(slopes) + JuMP.add_to_expression!(cost, slope * multiplier, pwl_vars[i]) + end + return cost +end + +################################################## +############ PWL Block Offer Constraints ######### +################################################## + +""" +Add block-offer PWL constraints: linking constraint and per-block upper bounds. + + power_var == Σ δ[k] + min_power_offset + δ[k] <= breakpoints[k+1] - breakpoints[k] for each k + +# Arguments +- `jump_model`: the JuMP model +- `con_container`: constraint container to store the linking constraint (indexed [name, t]) +- `name`: component name +- `t`: time period +- `power_var`: the power variable being linked +- `pwl_vars`: vector of block-offer delta variables (length = n_blocks) +- `breakpoints`: vector of breakpoint values (length = n_blocks + 1) +- `min_power_offset`: offset for minimum generation power (default 0.0) +""" +function add_pwl_block_offer_constraints!( + jump_model::JuMP.Model, + con_container, + name::String, + t::Int, + power_var::JuMPOrFloat, + pwl_vars::Vector{JuMP.VariableRef}, + breakpoints::Vector{<:JuMPOrFloat}, + min_power_offset::JuMPOrFloat = 0.0, +) + @assert length(pwl_vars) == length(breakpoints) - 1 + sum_pwl = sum(pwl_vars) + min_power_offset + con_container[name, t] = JuMP.@constraint(jump_model, power_var == sum_pwl) + for (ix, var) in enumerate(pwl_vars) + JuMP.@constraint(jump_model, var <= breakpoints[ix + 1] - breakpoints[ix]) + end + return +end + +################################################## +# Min-Gen-Power Dispatch Defaults +# POM overrides these for specific device types and formulations. +################################################## + +_include_min_gen_power_in_constraint( + ::Type, + ::VariableType, + ::AbstractDeviceFormulation, +) = false + +_include_constant_min_gen_power_in_constraint( + ::Type, + ::VariableType, + ::AbstractDeviceFormulation, +) = false + +################################################## +# PWL Block Offer Constraint Wrapper +# The ReserveDemandCurve-specific overload is in POM. +################################################## + +""" +Implement the constraints for PWL Block Offer variables. That is: + +```math +\\sum_{k\\in\\mathcal{K}} \\delta_{k,t} = p_t \\\\ +\\sum_{k\\in\\mathcal{K}} \\delta_{k,t} \\leq P_{k+1,t}^{max} - P_{k,t}^{max} +``` +""" +function _add_pwl_constraint!( + container::OptimizationContainer, + component::T, + ::U, + ::D, + break_points::Vector{<:JuMPOrFloat}, + pwl_vars::Vector{JuMP.VariableRef}, + period::Int, + ::Type{W}, +) where {T <: PSY.Component, U <: VariableType, + D <: AbstractDeviceFormulation, + W <: AbstractPiecewiseLinearBlockOfferConstraint} + variables = get_variable(container, U(), T) + const_container = lazy_container_addition!( + container, + W(), + T, + axes(variables)..., + ) + name = PSY.get_name(component) + + min_power_offset = if _include_constant_min_gen_power_in_constraint(T, U(), D()) + jump_fixed_value(first(break_points))::Float64 + elseif _include_min_gen_power_in_constraint(T, U(), D()) + p1::Float64 = jump_fixed_value(first(break_points)) + on_vars = get_variable(container, OnVariable(), T) + p1 * on_vars[name, period] + else + 0.0 + end + + add_pwl_block_offer_constraints!( + get_jump_model(container), + const_container, + name, + period, + variables[name, period], + pwl_vars, + break_points, + min_power_offset, + ) + return +end diff --git a/src/objective_function/objective_function_pwl_lambda.jl b/src/objective_function/objective_function_pwl_lambda.jl new file mode 100644 index 0000000..e396f1c --- /dev/null +++ b/src/objective_function/objective_function_pwl_lambda.jl @@ -0,0 +1,395 @@ +################################################## +# PWL Lambda (Convex Combination) Formulation +# +# Pure formulation math for the lambda/convex-combination PWL method. +# Variables δ_i ∈ [0,1], Σ δ_i = on_status, P = Σ δ_i * breakpoint_i, +# cost = Σ δ_i * y_i. +# +# Cost-data-specific mapping (CostCurve/FuelCurve → PiecewiseLinearData) +# stays in piecewise_linear.jl. +################################################## + +################################################## +################# SOS Methods #################### +################################################## + +# might belong in POM, but here for now. +abstract type VariableValueParameter <: RightHandSideParameter end +""" +Parameter to define unit commitment status updated from the system state +""" +struct OnStatusParameter <: VariableValueParameter end + +""" +Parameter to fix a variable value (e.g., from feedforward). +""" +struct FixValueParameter <: VariableValueParameter end + +""" +Struct to create the PiecewiseLinearCostConstraint associated with a specified variable. + +See the piecewise linear cost functions section for more information. +""" +struct PiecewiseLinearCostConstraint <: ConstraintType end + +""" +Normalization constraint for PWL cost: sum of delta variables equals on-status. +""" +struct PiecewiseLinearCostNormalizationConstraint <: ConstraintType end + +# mildly device-specific, but a single line of code. (Really dispatching on formulation here: +# device is just for safety. Shouldn't have non-thermal with thermal formulation.) +_sos_status(::Type{<:IS.InfrastructureSystemsComponent}, ::AbstractDeviceFormulation) = + SOSStatusVariable.NO_VARIABLE +_sos_status(::Type{<:PSY.ThermalGen}, ::AbstractThermalUnitCommitment) = + SOSStatusVariable.VARIABLE + +function _get_sos_value( + container::OptimizationContainer, + ::Type{V}, + component::T, +) where {T <: IS.InfrastructureSystemsComponent, V <: AbstractDeviceFormulation} + if skip_proportional_cost(component) + return SOSStatusVariable.NO_VARIABLE + elseif has_container_key(container, OnStatusParameter, T) + return SOSStatusVariable.PARAMETER + else + return _sos_status(T, V()) + end +end + +_get_sos_value( + ::OptimizationContainer, + ::Type{<:IS.InfrastructureSystemsComponent}, + ::AbstractServiceFormulation, +) = SOSStatusVariable.NO_VARIABLE + +################################################## +################# PWL Variables ################## +################################################## + +# This cases bounds the data by 1 - 0 +function _add_pwl_variables!( + container::OptimizationContainer, + ::Type{T}, + component_name::String, + time_period::Int, + cost_data::IS.PiecewiseLinearData, +) where {T <: IS.InfrastructureSystemsComponent} + var_container = lazy_container_addition!(container, PiecewiseLinearCostVariable(), T) + # length(PiecewiseStepData) gets number of segments, here we want number of points + pwlvars = Array{JuMP.VariableRef}(undef, length(cost_data) + 1) + for i in 1:(length(cost_data) + 1) + pwlvars[i] = + var_container[component_name, i, time_period] = JuMP.@variable( + get_jump_model(container), + base_name = "PiecewiseLinearCostVariable_$(component_name)_{pwl_$(i), $time_period}", + lower_bound = 0.0, + upper_bound = 1.0 + ) + end + return pwlvars +end + +################################################## +################# PWL Constraints ################ +################################################## + +function _determine_bin_lhs( + container::OptimizationContainer, + sos_status::SOSStatusVariable, + ::Type{T}, + name::String, + period::Int) where {T <: IS.InfrastructureSystemsComponent} + if sos_status == SOSStatusVariable.NO_VARIABLE + @debug "Using Piecewise Linear cost function but no variable/parameter ref for ON status is passed. Default status will be set to online (1.0)" _group = + LOG_GROUP_COST_FUNCTIONS + return 1.0 + elseif sos_status == SOSStatusVariable.PARAMETER + @debug "Using Piecewise Linear cost function with parameter OnStatusParameter, $T" _group = + LOG_GROUP_COST_FUNCTIONS + param = OnStatusParameter() + return get_parameter(container, param, T).parameter_array[name, period] + elseif sos_status == SOSStatusVariable.VARIABLE + @debug "Using Piecewise Linear cost function with variable OnVariable $T" _group = + LOG_GROUP_COST_FUNCTIONS + var = OnVariable() + return get_variable(container, var, T)[name, period] + else + @assert false + end +end + +# Migration note for POM: +# Old call: _add_pwl_constraint!(container, component, U(), break_points, sos_val, t) +# New call for standard form: +# power_var = get_variable(container, U(), T)[name, t] +# _add_pwl_constraint_standard!(container, component, break_points, sos_val, t, power_var) +# New call for compact form (PowerAboveMinimumVariable): +# power_var = get_variable(container, U(), T)[name, t] +# P_min = get_active_power_limits(component).min +# _add_pwl_constraint_compact!(container, component, break_points, sos_val, t, power_var, P_min) + +""" +Implement the standard constraints for PWL variables. That is: + +```math +\\sum_{k\\in\\mathcal{K}} P_k^{max} \\delta_{k,t} = p_t \\\\ +\\sum_{k\\in\\mathcal{K}} \\delta_{k,t} = on_t +``` + +For compact form (PowerAboveMinimumVariable), use `_add_pwl_constraint_compact!` instead. +""" +function _add_pwl_constraint_standard!( + container::OptimizationContainer, + component::T, + break_points::Vector{Float64}, + sos_status::SOSStatusVariable, + period::Int, + power_var::JuMP.VariableRef, + must_run::Bool = false, +) where {T <: IS.InfrastructureSystemsComponent} + name = get_name(component) + n_points = length(break_points) + + # Get PWL delta variables + pwl_var_container = get_variable(container, PiecewiseLinearCostVariable(), T) + pwl_vars = [pwl_var_container[name, i, period] for i in 1:n_points] + + # Linking constraint: power_var == sum(pwl_vars * breakpoints) + add_pwl_linking_constraint!( + container, + PiecewiseLinearCostConstraint, + T, + name, + period, + power_var, + pwl_vars, + break_points, + ) + + # Normalization constraint: sum(pwl_vars) == on_status + if must_run + bin = 1.0 + else + bin = _determine_bin_lhs(container, sos_status, T, name, period) + end + add_pwl_normalization_constraint!( + container, + PiecewiseLinearCostNormalizationConstraint, + T, + name, + period, + pwl_vars, + bin, + ) + return +end + +""" +Implement the constraints for PWL variables for Compact form. That is: + +```math +\\sum_{k\\in\\mathcal{K}} P_k^{max} \\delta_{k,t} = p_t + P_{\\min} \\cdot u_t \\\\ +\\sum_{k\\in\\mathcal{K}} \\delta_{k,t} = on_t +``` + +For standard form, use `_add_pwl_constraint_standard!` instead. +""" +function _add_pwl_constraint_compact!( + container::OptimizationContainer, + ::T, + name::String, + break_points::Vector{Float64}, + sos_status::SOSStatusVariable, + period::Int, + power_var::JuMP.VariableRef, + P_min::Float64, + must_run::Bool = false, +) where {T <: IS.InfrastructureSystemsComponent} + n_points = length(break_points) + + # Get on-status for compact form (needed for both linking and normalization) + if must_run + bin = 1.0 + else + bin = _determine_bin_lhs(container, sos_status, T, name, period) + end + + # Get PWL delta variables + pwl_var_container = get_variable(container, PiecewiseLinearCostVariable(), T) + pwl_vars = [pwl_var_container[name, i, period] for i in 1:n_points] + + # Create constraint container if needed + if !has_container_key(container, PiecewiseLinearCostConstraint, T) + con_key = ConstraintKey(PiecewiseLinearCostConstraint, T) + contents = Dict{Tuple{String, Int}, Union{Nothing, JuMP.ConstraintRef}}() + _assign_container!( + container.constraints, + con_key, + JuMP.Containers.SparseAxisArray(contents), + ) + end + con_container = get_constraint(container, PiecewiseLinearCostConstraint(), T) + jump_model = get_jump_model(container) + + # Compact form linking constraint includes P_min offset + con_container[name, period] = JuMP.@constraint( + jump_model, + bin * P_min + power_var == + sum(pwl_vars[i] * break_points[i] for i in 1:n_points) + ) + + # Normalization constraint: sum(pwl_vars) == on_status + add_pwl_normalization_constraint!( + container, + PiecewiseLinearCostNormalizationConstraint, + T, + name, + period, + pwl_vars, + bin, + ) + return +end + +################################################## +################ PWL Expressions ################# +################################################## + +# accepts scaled function data. +function _get_pwl_cost_expression( + container::OptimizationContainer, + ::Type{T}, + name::String, + time_period::Int, + cost_data::IS.PiecewiseLinearData, + multiplier::Float64, +) where {T <: IS.InfrastructureSystemsComponent} + pwl_var_container = get_variable(container, PiecewiseLinearCostVariable(), T) + gen_cost = JuMP.AffExpr(0.0) + y_coords_cost_data = IS.get_y_coords(cost_data) + for (i, cost) in enumerate(y_coords_cost_data) + JuMP.add_to_expression!( + gen_cost, + (cost * multiplier), + pwl_var_container[name, i, time_period], + ) + end + return gen_cost +end + +################################################## +# Lambda PWL Constraint Helpers +# (linking, normalization, SOS2) +################################################## + +""" +Add PWL linking constraint: power variable equals weighted sum of breakpoints. + + P[name, t] == Σ δ[i] * breakpoint[i] + +# Arguments +- `container`: the optimization container +- `K`: constraint type (caller provides) +- `C`: component type +- `name`: component name +- `t`: time period +- `power_var`: the power variable to link (JuMP variable reference) +- `pwl_vars`: vector of PWL delta variables +- `breakpoints`: vector of breakpoint values (in p.u.) +""" +function add_pwl_linking_constraint!( + container::OptimizationContainer, + ::Type{K}, + ::Type{C}, + name::String, + t::Int, + power_var::JuMP.VariableRef, + pwl_vars::Vector{JuMP.VariableRef}, + breakpoints::Vector{Float64}, +) where {K <: ConstraintType, C <: IS.InfrastructureSystemsComponent} + @assert length(pwl_vars) == length(breakpoints) + # Create sparse container with (name, time) indexing if it doesn't exist + if !has_container_key(container, K, C) + con_key = ConstraintKey(K, C) + contents = Dict{Tuple{String, Int}, Union{Nothing, JuMP.ConstraintRef}}() + _assign_container!( + container.constraints, + con_key, + JuMP.Containers.SparseAxisArray(contents), + ) + end + con_container = get_constraint(container, K(), C) + jump_model = get_jump_model(container) + con_container[name, t] = JuMP.@constraint( + jump_model, + power_var == sum(pwl_vars[i] * breakpoints[i] for i in eachindex(breakpoints)) + ) + return +end + +""" +Add PWL normalization constraint: delta variables sum to on_status. + + Σ δ[i] == on_status + +# Arguments +- `container`: the optimization container +- `K`: constraint type (caller provides) +- `C`: component type +- `name`: component name +- `t`: time period +- `pwl_vars`: vector of PWL delta variables +- `on_status`: the on/off status (1.0, or a JuMP variable/parameter) +""" +function add_pwl_normalization_constraint!( + container::OptimizationContainer, + ::Type{K}, + ::Type{C}, + name::String, + t::Int, + pwl_vars::Vector{JuMP.VariableRef}, + on_status::JuMPOrFloat, +) where {K <: ConstraintType, C <: IS.InfrastructureSystemsComponent} + # Create sparse container with (name, time) indexing if it doesn't exist + if !has_container_key(container, K, C) + con_key = ConstraintKey(K, C) + contents = Dict{Tuple{String, Int}, Union{Nothing, JuMP.ConstraintRef}}() + _assign_container!( + container.constraints, + con_key, + JuMP.Containers.SparseAxisArray(contents), + ) + end + con_container = get_constraint(container, K(), C) + jump_model = get_jump_model(container) + con_container[name, t] = JuMP.@constraint( + jump_model, + sum(pwl_vars) == on_status + ) + return +end + +""" +Add SOS2 constraint for PWL variables (required for non-convex curves). + +# Arguments +- `container`: the optimization container +- `C`: component type +- `name`: component name +- `t`: time period +- `pwl_vars`: vector of PWL delta variables +""" +function add_pwl_sos2_constraint!( + container::OptimizationContainer, + ::Type{C}, + name::String, + t::Int, + pwl_vars::Vector{JuMP.VariableRef}, +) where {C <: IS.InfrastructureSystemsComponent} + jump_model = get_jump_model(container) + n_points = length(pwl_vars) + JuMP.@constraint(jump_model, pwl_vars in MOI.SOS2(collect(1:n_points))) + return +end diff --git a/src/objective_function/piecewise_linear.jl b/src/objective_function/piecewise_linear.jl index 2da58ea..bf929d1 100644 --- a/src/objective_function/piecewise_linear.jl +++ b/src/objective_function/piecewise_linear.jl @@ -1,273 +1,7 @@ ################################################## -################# SOS Methods #################### +######## CostCurve/FuelCurve → Lambda PWL ######## ################################################## -# might belong in POM, but here for now. -abstract type VariableValueParameter <: RightHandSideParameter end -""" -Parameter to define unit commitment status updated from the system state -""" -struct OnStatusParameter <: VariableValueParameter end - -""" -Parameter to fix a variable value (e.g., from feedforward). -""" -struct FixValueParameter <: VariableValueParameter end - -""" -Struct to create the PiecewiseLinearCostConstraint associated with a specified variable. - -See the piecewise linear cost functions section for more information. -""" -struct PiecewiseLinearCostConstraint <: ConstraintType end - -""" -Normalization constraint for PWL cost: sum of delta variables equals on-status. -""" -struct PiecewiseLinearCostNormalizationConstraint <: ConstraintType end - -# mildly device-specific, but a single line of code. (Really dispatching on formulation here: -# device is just for safety. Shouldn't have non-thermal with thermal formulation.) -_sos_status(::Type{<:IS.InfrastructureSystemsComponent}, ::AbstractDeviceFormulation) = - SOSStatusVariable.NO_VARIABLE -_sos_status(::Type{<:PSY.ThermalGen}, ::AbstractThermalUnitCommitment) = - SOSStatusVariable.VARIABLE - -function _get_sos_value( - container::OptimizationContainer, - ::Type{V}, - component::T, -) where {T <: IS.InfrastructureSystemsComponent, V <: AbstractDeviceFormulation} - if skip_proportional_cost(component) - return SOSStatusVariable.NO_VARIABLE - elseif has_container_key(container, OnStatusParameter, T) - return SOSStatusVariable.PARAMETER - else - return _sos_status(T, V()) - end -end - -_get_sos_value( - ::OptimizationContainer, - ::Type{<:IS.InfrastructureSystemsComponent}, - ::AbstractServiceFormulation, -) = SOSStatusVariable.NO_VARIABLE - -################################################## -################# PWL Variables ################## -################################################## - -# This cases bounds the data by 1 - 0 -function _add_pwl_variables!( - container::OptimizationContainer, - ::Type{T}, - component_name::String, - time_period::Int, - cost_data::IS.PiecewiseLinearData, -) where {T <: IS.InfrastructureSystemsComponent} - var_container = lazy_container_addition!(container, PiecewiseLinearCostVariable(), T) - # length(PiecewiseStepData) gets number of segments, here we want number of points - pwlvars = Array{JuMP.VariableRef}(undef, length(cost_data) + 1) - for i in 1:(length(cost_data) + 1) - pwlvars[i] = - var_container[component_name, i, time_period] = JuMP.@variable( - get_jump_model(container), - base_name = "PiecewiseLinearCostVariable_$(component_name)_{pwl_$(i), $time_period}", - lower_bound = 0.0, - upper_bound = 1.0 - ) - end - return pwlvars -end - -################################################## -################# PWL Constraints ################ -################################################## - -function _determine_bin_lhs( - container::OptimizationContainer, - sos_status::SOSStatusVariable, - ::Type{T}, - name::String, - period::Int) where {T <: IS.InfrastructureSystemsComponent} - if sos_status == SOSStatusVariable.NO_VARIABLE - return 1.0 - @debug "Using Piecewise Linear cost function but no variable/parameter ref for ON status is passed. Default status will be set to online (1.0)" _group = - LOG_GROUP_COST_FUNCTIONS - elseif sos_status == SOSStatusVariable.PARAMETER - param = OnStatusParameter() - return get_parameter(container, param, T).parameter_array[name, period] - @debug "Using Piecewise Linear cost function with parameter OnStatusParameter, $T" _group = - LOG_GROUP_COST_FUNCTIONS - elseif sos_status == SOSStatusVariable.VARIABLE - var = OnVariable() - return get_variable(container, var, T)[name, period] - @debug "Using Piecewise Linear cost function with variable OnVariable $T" _group = - LOG_GROUP_COST_FUNCTIONS - else - @assert false - end -end - -# Migration note for POM: -# Old call: _add_pwl_constraint!(container, component, U(), break_points, sos_val, t) -# New call for standard form: -# power_var = get_variable(container, U(), T)[name, t] -# _add_pwl_constraint_standard!(container, component, break_points, sos_val, t, power_var) -# New call for compact form (PowerAboveMinimumVariable): -# power_var = get_variable(container, U(), T)[name, t] -# P_min = get_active_power_limits(component).min -# _add_pwl_constraint_compact!(container, component, break_points, sos_val, t, power_var, P_min) - -""" -Implement the standard constraints for PWL variables. That is: - -```math -\\sum_{k\\in\\mathcal{K}} P_k^{max} \\delta_{k,t} = p_t \\\\ -\\sum_{k\\in\\mathcal{K}} \\delta_{k,t} = on_t -``` - -For compact form (PowerAboveMinimumVariable), use `_add_pwl_constraint_compact!` instead. -""" -function _add_pwl_constraint_standard!( - container::OptimizationContainer, - component::T, - break_points::Vector{Float64}, - sos_status::SOSStatusVariable, - period::Int, - power_var::JuMP.VariableRef, - must_run::Bool = false, -) where {T <: IS.InfrastructureSystemsComponent} - name = get_name(component) - n_points = length(break_points) - - # Get PWL delta variables - pwl_var_container = get_variable(container, PiecewiseLinearCostVariable(), T) - pwl_vars = [pwl_var_container[name, i, period] for i in 1:n_points] - - # Linking constraint: power_var == sum(pwl_vars * breakpoints) - add_pwl_linking_constraint!( - container, - PiecewiseLinearCostConstraint, - T, - name, - period, - power_var, - pwl_vars, - break_points, - ) - - # Normalization constraint: sum(pwl_vars) == on_status - if must_run - bin = 1.0 - else - bin = _determine_bin_lhs(container, sos_status, T, name, period) - end - add_pwl_normalization_constraint!( - container, - PiecewiseLinearCostNormalizationConstraint, - T, - name, - period, - pwl_vars, - bin, - ) - return -end - -""" -Implement the constraints for PWL variables for Compact form. That is: - -```math -\\sum_{k\\in\\mathcal{K}} P_k^{max} \\delta_{k,t} = p_t + P_min * u_t \\\\ -\\sum_{k\\in\\mathcal{K}} \\delta_{k,t} = on_t -``` - -For standard form, use `_add_pwl_constraint_standard!` instead. -""" -function _add_pwl_constraint_compact!( - container::OptimizationContainer, - ::T, - name::String, - break_points::Vector{Float64}, - sos_status::SOSStatusVariable, - period::Int, - power_var::JuMP.VariableRef, - P_min::Float64, - must_run::Bool = false, -) where {T <: IS.InfrastructureSystemsComponent} - n_points = length(break_points) - - # Get on-status for compact form (needed for both linking and normalization) - if must_run - bin = 1.0 - else - bin = _determine_bin_lhs(container, sos_status, T, name, period) - end - - # Get PWL delta variables - pwl_var_container = get_variable(container, PiecewiseLinearCostVariable(), T) - pwl_vars = [pwl_var_container[name, i, period] for i in 1:n_points] - - # Create constraint container if needed - if !has_container_key(container, PiecewiseLinearCostConstraint, T) - con_key = ConstraintKey(PiecewiseLinearCostConstraint, T) - contents = Dict{Tuple{String, Int}, Union{Nothing, JuMP.ConstraintRef}}() - _assign_container!( - container.constraints, - con_key, - JuMP.Containers.SparseAxisArray(contents), - ) - end - con_container = get_constraint(container, PiecewiseLinearCostConstraint(), T) - jump_model = get_jump_model(container) - - # Compact form linking constraint includes P_min offset - con_container[name, period] = JuMP.@constraint( - jump_model, - bin * P_min + power_var == - sum(pwl_vars[i] * break_points[i] for i in 1:n_points) - ) - - # Normalization constraint: sum(pwl_vars) == on_status - add_pwl_normalization_constraint!( - container, - PiecewiseLinearCostNormalizationConstraint, - T, - name, - period, - pwl_vars, - bin, - ) - return -end - -################################################## -################ PWL Expressions ################# -################################################## - -# accepts scaled function data. -function _get_pwl_cost_expression( - container::OptimizationContainer, - ::Type{T}, - name::String, - time_period::Int, - cost_data::IS.PiecewiseLinearData, - multiplier::Float64, -) where {T <: IS.InfrastructureSystemsComponent} - pwl_var_container = get_variable(container, PiecewiseLinearCostVariable(), T) - gen_cost = JuMP.AffExpr(0.0) - y_coords_cost_data = IS.get_y_coords(cost_data) - for (i, cost) in enumerate(y_coords_cost_data) - JuMP.add_to_expression!( - gen_cost, - (cost * multiplier), - pwl_var_container[name, i, time_period], - ) - end - return gen_cost -end - _get_pwl_cost_multiplier(::IS.FuelCurve{IS.PiecewisePointCurve}, ::VariableType, ::AbstractDeviceFormulation, @@ -326,7 +60,21 @@ end ################################################## """ -Add PWL cost terms for data coming from a PiecewisePointCurve +Add PWL cost terms using the **lambda (convex combination) formulation**. + +Given a `PiecewisePointCurve` with breakpoints ``(P_i, C_i)``, this function: + +1. Creates lambda variables ``\\lambda_i \\in [0, 1]`` at each breakpoint via [`_add_pwl_variables!`](@ref). +2. Adds linking and normalization constraints via [`_add_pwl_constraint_standard!`](@ref). +3. If the cost curve is **non-convex**, adds an SOS2 adjacency constraint so that at most + two neighboring ``\\lambda`` values are nonzero. +4. Builds the cost expression ``C = \\sum_i \\lambda_i \\, C(P_i)``. + +Returns a vector of cost expressions, one per time step, which the caller adds to the +objective function. + +See also: [`add_pwl_term!`](@ref) for the delta (block-offer) formulation used by +`MarketBidCost`. """ function _add_pwl_term!( container::OptimizationContainer, @@ -358,14 +106,12 @@ function _add_pwl_term!( device_base_power, ) - if all(iszero.((point -> point.y).(IS.get_points(data)))) # TODO I think this should have been first. before? + if all(iszero.((point -> point.y).(IS.get_points(data)))) @debug "All cost terms for component $(name) are 0.0" _group = LOG_GROUP_COST_FUNCTIONS return end - # Compact PWL data does not exists anymore - cost_is_convex = IS.is_convex(data) if !cost_is_convex @warn( @@ -377,20 +123,20 @@ function _add_pwl_term!( time_steps = get_time_steps(container) pwl_cost_expressions = Vector{JuMP.AffExpr}(undef, time_steps[end]) sos_val = _get_sos_value(container, V, component) + power_variables = get_variable(container, U(), T) + n_points = length(break_points) for t in time_steps _add_pwl_variables!(container, T, name, t, data) - power_var = get_variable(container, U(), T)[name, t] _add_pwl_constraint_standard!( container, component, break_points, sos_val, t, - power_var, + power_variables[name, t], ) if !cost_is_convex pwl_var_container = get_variable(container, PiecewiseLinearCostVariable(), T) - n_points = length(break_points) pwl_vars = [pwl_var_container[name, i, t] for i in 1:n_points] add_pwl_sos2_constraint!(container, T, name, t, pwl_vars) end @@ -426,16 +172,9 @@ function add_variable_cost_to_objective!( } component_name = get_name(component) @debug "PWL Variable Cost" _group = LOG_GROUP_COST_FUNCTIONS component_name - # If array is full of tuples with zeros return 0.0 - value_curve = IS.get_value_curve(cost_function) - cost_component = IS.get_function_data(value_curve) - if all(iszero.((point -> point.y).(IS.get_points(cost_component)))) # TODO I think this should have been first. before? - @debug "All cost terms for component $(component_name) are 0.0" _group = - LOG_GROUP_COST_FUNCTIONS - return - end pwl_cost_expressions = _add_pwl_term!(container, component, cost_function, T(), U()) + isnothing(pwl_cost_expressions) && return for t in get_time_steps(container) add_cost_to_expression!( container, @@ -471,16 +210,9 @@ function add_variable_cost_to_objective!( } component_name = get_name(component) @debug "PWL Variable Cost" _group = LOG_GROUP_COST_FUNCTIONS component_name - # If array is full of tuples with zeros return 0.0 - value_curve = IS.get_value_curve(cost_function) - cost_component = IS.get_function_data(value_curve) - if all(iszero.((point -> point.y).(IS.get_points(cost_component)))) # TODO I think this should have been first. before? - @debug "All cost terms for component $(component_name) are 0.0" _group = - LOG_GROUP_COST_FUNCTIONS - return - end pwl_fuel_consumption_expressions = _add_pwl_term!(container, component, cost_function, T(), U()) + isnothing(pwl_fuel_consumption_expressions) && return # IS getter: simply returns the field of the FuelCurve struct is_time_variant_ = is_time_variant(IS.get_fuel_cost(cost_function)) diff --git a/src/quadratic_approximations/common.jl b/src/quadratic_approximations/common.jl new file mode 100644 index 0000000..e426035 --- /dev/null +++ b/src/quadratic_approximations/common.jl @@ -0,0 +1,43 @@ +""" +Convenience macro for quadratic approximations. + +Specify sparse by putting :sparse in the last two args +Specify custom meta by putting meta_-prefixed variable in the last two args +""" + +macro _add_container(type, key, args...) + fname = Symbol("add_", type, "_container!") + + offset = 0 + sparse = :(false) + meta = :(meta) + if !isempty(args) + start = max(length(args) - 1, 1) + for arg in args[start:end] + if arg == :sparse + sparse = :(true) + offset += 1 + end + if occursin("meta_", string(arg)) + meta = arg + offset += 1 + end + end + end + + axes = args[1:(end - offset)] + return esc( + :( + $fname( + container, + $key(), + C, + names, + $(axes...), + time_steps; + meta = $meta, + sparse = $sparse, + ) + ), + ) +end diff --git a/src/quadratic_approximations/dnmdt.jl b/src/quadratic_approximations/dnmdt.jl new file mode 100644 index 0000000..390a6ee --- /dev/null +++ b/src/quadratic_approximations/dnmdt.jl @@ -0,0 +1,549 @@ +# D-NMDT and T-D-NMDT (Doubly Discretized Normalized Multiparametric Disaggregation Technique) +# MIP relaxations for bilinear products z = x*y and univariate quadratics z = x^2. +# A single `tighten` flag controls whether epigraph cuts are added (T-D-NMDT) or not (D-NMDT). +# Reference: Beach, Burlacu, Barmann, Hager, Hildebrand (2024), Definitions 8-10. + +# ── Variable types ──────────────────────────────────────────────────────────── +"Binary expansion variables beta[j] in D-NMDT base-2 discretization." +struct DNMDTBinaryVariable <: VariableType end +"Continuous residual Delta_w in D-NMDT base-2 discretization." +struct DNMDTResidualVariable <: VariableType end +"Scaled [0,1] variable w_hat = (w - w_min) / (w_max - w_min)." +struct DNMDTScaledVariable <: VariableType end +"Product auxiliary variables u[j], v[j] from binary x continuous McCormick." +struct DNMDTProductAuxVariable <: VariableType end +"Residual product variable Delta_z from McCormick on Delta_x * Delta_y." +struct DNMDTResidualProductVariable <: VariableType end + +# ── Constraint types ────────────────────────────────────────────────────────── +"Binary expansion constraint: w_hat = sum(2^-j * beta[j]) + Delta_w." +struct DNMDTBinaryExpansionConstraint <: ConstraintType end +"Scaling constraint: w_hat = (w - w_min) / (w_max - w_min)." +struct DNMDTScalingConstraint <: ConstraintType end +"McCormick constraints on beta[j] x continuous products." +struct DNMDTBinaryMcCormickConstraint <: ConstraintType end +"Back-transformation constraint: z = lx*ly*z_hat + offsets." +struct DNMDTBackTransformConstraint <: ConstraintType end +"Upper-bound McCormick on residual product (tightened univariate case)." +struct DNMDTResidualUpperBoundConstraint <: ConstraintType end +"McCormick bounds on x^2 (global convex/concave envelope)." +struct DNMDTSquareBoundConstraint <: ConstraintType end + +# ── Expression types ────────────────────────────────────────────────────────── +"Final bivariate result expression z ~ x*y." +struct DNMDTBilinearExpression <: ExpressionType end +"Final univariate result expression z ~ x^2." +struct DNMDTQuadraticExpression <: ExpressionType end + +# ── Helper: populate binary expansion ───────────────────────────────────────── + +""" + _populate_binary_expansion!(jump_model, C, names, time_steps, w_var_container, w_min, lw, depth, eps_L, wh_con, beta_con, dw_con, scaling_con, expansion_con) + +Populate pre-allocated containers with base-2 binary expansion variables and constraints. + +For each (name, t), creates: +- Scaled variable w_hat in [0,1] +- L binary variables beta[j] +- Continuous residual Delta_w in [0, 2^{-L}] +- Scaling constraint: w_hat = (w - w_min) / lw +- Expansion constraint: w_hat = sum(2^{-j} * beta[j]) + Delta_w +""" +function _populate_binary_expansion!( + jump_model::JuMP.Model, + ::Type{C}, + names::Vector{String}, + time_steps::UnitRange{Int}, + w_var_container, + w_min::Float64, + lw::Float64, + depth::Int, + eps_L::Float64, + wh_con, + beta_con, + dw_con, + scaling_con, + expansion_con, +) where {C <: IS.InfrastructureSystemsComponent} + for name in names, t in time_steps + wh_con[name, t] = JuMP.@variable( + jump_model, + base_name = "DNMDTScaled_$(C)_{$(name), $(t)}", + lower_bound = 0.0, + upper_bound = 1.0, + ) + dw_con[name, t] = JuMP.@variable( + jump_model, + base_name = "DNMDTResidual_$(C)_{$(name), $(t)}", + lower_bound = 0.0, + upper_bound = eps_L, + ) + for j in 1:depth + beta_con[name, j, t] = JuMP.@variable( + jump_model, + base_name = "DNMDTBin_$(C)_{$(name), $(j), $(t)}", + binary = true, + ) + end + scaling_con[name, t] = JuMP.@constraint( + jump_model, + wh_con[name, t] == (w_var_container[name, t] - w_min) / lw, + ) + expansion_con[name, t] = JuMP.@constraint( + jump_model, + wh_con[name, t] == + sum(2.0^(-j) * beta_con[name, j, t] for j in 1:depth) + dw_con[name, t], + ) + end + return +end + +# ── Univariate D-NMDT / T-D-NMDT for z = x^2 ──────────────────────────────── + +""" + _add_dnmdt_univariate_approx!(container, C, names, time_steps, x_var_container, x_min, x_max, depth, meta; tighten, epigraph_depth) + +Approximate z = x^2 using univariate D-NMDT (Definition 9) or T-D-NMDT (Definition 10). + +When `tighten=false`: D-NMDT with full residual McCormick. +When `tighten=true`: T-D-NMDT replaces residual lower bounds with epigraph Q^{L1} cuts. + +Stores result in a `DNMDTQuadraticExpression` expression container. +""" +function _add_dnmdt_univariate_approx!( + container::OptimizationContainer, + ::Type{C}, + names::Vector{String}, + time_steps::UnitRange{Int}, + x_var_container, + x_min::Float64, + x_max::Float64, + depth::Int, + meta::String; + tighten::Bool = true, + epigraph_depth::Int = max(2, ceil(Int, 1.5 * depth)), +) where {C <: IS.InfrastructureSystemsComponent} + IS.@assert_op x_max > x_min + IS.@assert_op depth >= 1 + jump_model = get_jump_model(container) + lx = x_max - x_min + eps_L = 2.0^(-depth) + ws_hi = eps_L + 1.0 + + # Precompute power-of-two coefficients (invariant across names and time steps) + pow2_neg = [2.0^(-j) for j in 1:depth] + + # Valid bounds for z ≈ x² + z_ub = max(x_min^2, x_max^2) + z_lb = (x_min <= 0.0 <= x_max) ? 0.0 : min(x_min^2, x_max^2) + + # ── Allocate containers ────────────────────────────────────────────── + + # Binary expansion + xh_con = add_variable_container!( + container, DNMDTScaledVariable(), C, names, time_steps; meta, + ) + beta_con = add_variable_container!( + container, DNMDTBinaryVariable(), C, names, 1:depth, time_steps; meta, + ) + dx_con = add_variable_container!( + container, DNMDTResidualVariable(), C, names, time_steps; meta, + ) + scaling = add_constraints_container!( + container, DNMDTScalingConstraint(), C, names, time_steps; meta, + ) + expansion = add_constraints_container!( + container, DNMDTBinaryExpansionConstraint(), C, names, time_steps; meta, + ) + + # Product aux u[j] + u_con = add_variable_container!( + container, DNMDTProductAuxVariable(), C, names, 1:depth, time_steps; + meta = meta * "_u", + ) + bmc = add_constraints_container!( + container, DNMDTBinaryMcCormickConstraint(), C, + names, 1:3, 1:depth, time_steps; sparse = true, meta, + ) + + # Residual Delta_z + dz_con = add_variable_container!( + container, DNMDTResidualProductVariable(), C, names, time_steps; meta, + ) + + # Final z variable + back-transform + z_con = add_variable_container!( + container, BilinearProductVariable(), C, names, time_steps; meta, + ) + bt = add_constraints_container!( + container, DNMDTBackTransformConstraint(), C, names, time_steps; meta, + ) + + # McCormick on x^2 (global bounds) + sq = add_constraints_container!( + container, DNMDTSquareBoundConstraint(), C, + names, 1:3, time_steps; sparse = true, meta, + ) + + # Result expression + result_expr = add_expression_container!( + container, DNMDTQuadraticExpression(), C, names, time_steps; meta, + ) + + # Tightening: residual upper bound + if tighten + ub = add_constraints_container!( + container, DNMDTResidualUpperBoundConstraint(), C, + names, time_steps; meta, + ) + end + + # ── Populate: binary expansion ─────────────────────────────────────── + + _populate_binary_expansion!( + jump_model, C, names, time_steps, + x_var_container, x_min, lx, depth, eps_L, + xh_con, beta_con, dx_con, scaling, expansion, + ) + + # ── Populate: sum term, product aux, assembly, back-transform ──────── + + for name in names, t in time_steps + x = x_var_container[name, t] + + # Sum term: w_sum = Delta_x + x_hat (used locally in McCormick below) + w_sum = JuMP.AffExpr(0.0) + JuMP.add_to_expression!(w_sum, 1.0, dx_con[name, t]) + JuMP.add_to_expression!(w_sum, 1.0, xh_con[name, t]) + + # Binary McCormick for u[j] + for j in 1:depth + u_con[name, j, t] = JuMP.@variable( + jump_model, + base_name = "DNMDTu_$(C)_{$(name), $(j), $(t)}", + lower_bound = 0.0, + upper_bound = ws_hi, + ) + u_j = u_con[name, j, t] + beta_j = beta_con[name, j, t] + bmc[(name, 1, j, t)] = + JuMP.@constraint(jump_model, u_j <= ws_hi * beta_j) + bmc[(name, 2, j, t)] = + JuMP.@constraint(jump_model, u_j >= w_sum - ws_hi * (1 - beta_j)) + bmc[(name, 3, j, t)] = + JuMP.@constraint(jump_model, u_j <= w_sum) + end + + # Residual (bounded by product of residual ranges [0, eps_L]²) + dz_con[name, t] = JuMP.@variable( + jump_model, + base_name = "DNMDTdz_$(C)_{$(name), $(t)}", + lower_bound = 0.0, + upper_bound = eps_L * eps_L, + ) + if tighten + ub[name, t] = JuMP.@constraint( + jump_model, + dz_con[name, t] <= eps_L * dx_con[name, t], + ) + end + + # Scaled product z_hat (used locally in back-transform below) + zh = JuMP.AffExpr(0.0) + for j in 1:depth + JuMP.add_to_expression!(zh, pow2_neg[j], u_con[name, j, t]) + end + JuMP.add_to_expression!(zh, 1.0, dz_con[name, t]) + + # Back-transform: z = lx^2 * z_hat + 2 * x_min * lx * x_hat + x_min^2 + z_var = JuMP.@variable( + jump_model, + base_name = "DNMDTz_$(C)_{$(name), $(t)}", + lower_bound = z_lb, + upper_bound = z_ub, + ) + z_con[name, t] = z_var + bt[name, t] = JuMP.@constraint( + jump_model, + z_var == lx^2 * zh + 2 * x_min * lx * xh_con[name, t] + x_min^2, + ) + result_expr[name, t] = JuMP.AffExpr(0.0, z_var => 1.0) + + # McCormick on x^2 + sq[(name, 1, t)] = + JuMP.@constraint(jump_model, z_var >= 2 * x_min * x - x_min^2) + sq[(name, 2, t)] = + JuMP.@constraint(jump_model, z_var >= 2 * x_max * x - x_max^2) + sq[(name, 3, t)] = + JuMP.@constraint(jump_model, z_var <= (x_min + x_max) * x - x_min * x_max) + end + + # ── Residual McCormick (D-NMDT only) ── + if !tighten + _add_mccormick_envelope!( + container, C, names, time_steps, + dx_con, dx_con, dz_con, + 0.0, eps_L, 0.0, eps_L, meta * "_residual", + ) + + # ── Epigraph tightening (T-D-NMDT only) ── + else + _add_epigraph_quadratic_approx!( + container, C, names, time_steps, + x_var_container, x_min, x_max, epigraph_depth, meta * "_epi", + ) + epi_container = get_expression( + container, EpigraphExpression(), C, meta * "_epi", + ) + epi_lb = add_constraints_container!( + container, DNMDTSquareBoundConstraint(), C, + names, time_steps; meta = meta * "_epi_lb", + ) + for name in names, t in time_steps + epi_lb[name, t] = JuMP.@constraint( + jump_model, + z_con[name, t] >= epi_container[name, t], + ) + end + end + + return +end + +# ── Bivariate D-NMDT for z = x*y ───────────────────────────────────────────── + +""" + _add_dnmdt_bilinear_approx!(container, C, names, time_steps, x_var_container, y_var_container, x_min, x_max, y_min, y_max, depth, meta; add_mccormick) + +Approximate z = x*y using bivariate D-NMDT (Definition 8). + +Discretizes both x and y with base-2 binary expansion, applies McCormick envelopes +to all binary x continuous products, and handles the residual product via McCormick. +Uses lambda = DNMDT_LAMBDA (0.5) per Remark 1. + +Stores result in a `DNMDTBilinearExpression` expression container. +""" +function _add_dnmdt_bilinear_approx!( + container::OptimizationContainer, + ::Type{C}, + names::Vector{String}, + time_steps::UnitRange{Int}, + x_var_container, + y_var_container, + x_min::Float64, + x_max::Float64, + y_min::Float64, + y_max::Float64, + depth::Int, + meta::String; + add_mccormick::Bool = true, +) where {C <: IS.InfrastructureSystemsComponent} + IS.@assert_op x_max > x_min + IS.@assert_op y_max > y_min + IS.@assert_op depth >= 1 + jump_model = get_jump_model(container) + lx = x_max - x_min + ly = y_max - y_min + eps_L = 2.0^(-depth) + meta_x = meta * "_x" + meta_y = meta * "_y" + + # Precompute power-of-two coefficients + pow2_neg = [2.0^(-j) for j in 1:depth] + + # Valid bounds for z ≈ x·y + z_lo = min(x_min * y_min, x_min * y_max, x_max * y_min, x_max * y_max) + z_hi = max(x_min * y_min, x_min * y_max, x_max * y_min, x_max * y_max) + + # Blended-term bounds + lambda = DNMDT_LAMBDA + wu_lo = 0.0 + wu_hi = lambda * eps_L + (1 - lambda) + wv_lo = 0.0 + wv_hi = (1 - lambda) * eps_L + lambda + + # ── Allocate containers ────────────────────────────────────────────── + + # Binary expansion of x + xh_con = add_variable_container!( + container, DNMDTScaledVariable(), C, names, time_steps; meta = meta_x, + ) + beta_x_con = add_variable_container!( + container, DNMDTBinaryVariable(), C, names, 1:depth, time_steps; meta = meta_x, + ) + dx_con = add_variable_container!( + container, DNMDTResidualVariable(), C, names, time_steps; meta = meta_x, + ) + scaling_x = add_constraints_container!( + container, DNMDTScalingConstraint(), C, names, time_steps; meta = meta_x, + ) + expansion_x = add_constraints_container!( + container, DNMDTBinaryExpansionConstraint(), C, names, time_steps; meta = meta_x, + ) + + # Binary expansion of y + yh_con = add_variable_container!( + container, DNMDTScaledVariable(), C, names, time_steps; meta = meta_y, + ) + beta_y_con = add_variable_container!( + container, DNMDTBinaryVariable(), C, names, 1:depth, time_steps; meta = meta_y, + ) + dy_con = add_variable_container!( + container, DNMDTResidualVariable(), C, names, time_steps; meta = meta_y, + ) + scaling_y = add_constraints_container!( + container, DNMDTScalingConstraint(), C, names, time_steps; meta = meta_y, + ) + expansion_y = add_constraints_container!( + container, DNMDTBinaryExpansionConstraint(), C, names, time_steps; meta = meta_y, + ) + + # Product aux variables u[j], v[j] + u_con = add_variable_container!( + container, DNMDTProductAuxVariable(), C, names, 1:depth, time_steps; + meta = meta * "_u", + ) + v_con = add_variable_container!( + container, DNMDTProductAuxVariable(), C, names, 1:depth, time_steps; + meta = meta * "_v", + ) + + # Binary McCormick constraints + bmc = add_constraints_container!( + container, DNMDTBinaryMcCormickConstraint(), C, + names, 1:3, 1:depth, ["u", "v"], time_steps; sparse = true, meta, + ) + + # Residual product Delta_z + dz_con = add_variable_container!( + container, DNMDTResidualProductVariable(), C, names, time_steps; meta, + ) + + # Final z variable + back-transform constraint + z_con = add_variable_container!( + container, BilinearProductVariable(), C, names, time_steps; meta, + ) + bt = add_constraints_container!( + container, DNMDTBackTransformConstraint(), C, names, time_steps; meta, + ) + + # Result expression + result_expr = add_expression_container!( + container, DNMDTBilinearExpression(), C, names, time_steps; meta, + ) + + # ── Populate: binary expansions ────────────────────────────────────── + + _populate_binary_expansion!( + jump_model, C, names, time_steps, + x_var_container, x_min, lx, depth, eps_L, + xh_con, beta_x_con, dx_con, scaling_x, expansion_x, + ) + + _populate_binary_expansion!( + jump_model, C, names, time_steps, + y_var_container, y_min, ly, depth, eps_L, + yh_con, beta_y_con, dy_con, scaling_y, expansion_y, + ) + + # ── Populate: blended terms, product aux, assembly, back-transform ─── + + for name in names, t in time_steps + # Blended terms (used locally in McCormick below) + # w_u = lambda*Delta_y + (1-lambda)*y_hat + w_u = JuMP.AffExpr(0.0) + JuMP.add_to_expression!(w_u, lambda, dy_con[name, t]) + JuMP.add_to_expression!(w_u, 1 - lambda, yh_con[name, t]) + + # w_v = (1-lambda)*Delta_x + lambda*x_hat + w_v = JuMP.AffExpr(0.0) + JuMP.add_to_expression!(w_v, 1 - lambda, dx_con[name, t]) + JuMP.add_to_expression!(w_v, lambda, xh_con[name, t]) + + # Binary McCormick for u[j] and v[j] + for j in 1:depth + u_con[name, j, t] = JuMP.@variable( + jump_model, + base_name = "DNMDTu_$(C)_{$(name), $(j), $(t)}", + lower_bound = 0.0, + upper_bound = wu_hi, + ) + v_con[name, j, t] = JuMP.@variable( + jump_model, + base_name = "DNMDTv_$(C)_{$(name), $(j), $(t)}", + lower_bound = 0.0, + upper_bound = wv_hi, + ) + + u_j = u_con[name, j, t] + beta_xj = beta_x_con[name, j, t] + bmc[(name, 1, j, "u", t)] = + JuMP.@constraint(jump_model, u_j <= wu_hi * beta_xj) + bmc[(name, 2, j, "u", t)] = + JuMP.@constraint(jump_model, u_j >= w_u - wu_hi * (1 - beta_xj)) + bmc[(name, 3, j, "u", t)] = + JuMP.@constraint(jump_model, u_j <= w_u) + + v_j = v_con[name, j, t] + beta_yj = beta_y_con[name, j, t] + bmc[(name, 1, j, "v", t)] = + JuMP.@constraint(jump_model, v_j <= wv_hi * beta_yj) + bmc[(name, 2, j, "v", t)] = + JuMP.@constraint(jump_model, v_j >= w_v - wv_hi * (1 - beta_yj)) + bmc[(name, 3, j, "v", t)] = + JuMP.@constraint(jump_model, v_j <= w_v) + end + + # Residual product variable (bounded by [0, eps_L²]) + dz_con[name, t] = JuMP.@variable( + jump_model, + base_name = "DNMDTdz_$(C)_{$(name), $(t)}", + lower_bound = 0.0, + upper_bound = eps_L * eps_L, + ) + + # Scaled product z_hat (used locally in back-transform below) + zh = JuMP.AffExpr(0.0) + for j in 1:depth + JuMP.add_to_expression!(zh, pow2_neg[j], u_con[name, j, t]) + JuMP.add_to_expression!(zh, pow2_neg[j], v_con[name, j, t]) + end + JuMP.add_to_expression!(zh, 1.0, dz_con[name, t]) + + # Back-transform: z = lx*ly*z_hat + x_min*ly*y_hat + y_min*lx*x_hat + x_min*y_min + z_var = JuMP.@variable( + jump_model, + base_name = "DNMDTz_$(C)_{$(name), $(t)}", + lower_bound = z_lo, + upper_bound = z_hi, + ) + z_con[name, t] = z_var + bt[name, t] = JuMP.@constraint( + jump_model, + z_var == + lx * ly * zh + + x_min * ly * yh_con[name, t] + + y_min * lx * xh_con[name, t] + + x_min * y_min, + ) + result_expr[name, t] = JuMP.AffExpr(0.0, z_var => 1.0) + end + + # ── Residual McCormick (Delta_z ~ Delta_x * Delta_y) ── + _add_mccormick_envelope!( + container, C, names, time_steps, + dx_con, dy_con, dz_con, + 0.0, eps_L, 0.0, eps_L, meta * "_residual", + ) + + # ── Global McCormick on z = x*y ── + if add_mccormick + _add_mccormick_envelope!( + container, C, names, time_steps, + x_var_container, y_var_container, z_con, + x_min, x_max, y_min, y_max, meta, + ) + end + + return +end diff --git a/src/quadratic_approximations/epigraph.jl b/src/quadratic_approximations/epigraph.jl index 590e78c..c9e05f1 100644 --- a/src/quadratic_approximations/epigraph.jl +++ b/src/quadratic_approximations/epigraph.jl @@ -12,7 +12,7 @@ struct EpigraphVariable <: VariableType end struct EpigraphTangentConstraint <: ConstraintType end """ - _add_epigraph_quadratic_approx!(container, C, names, time_steps, x_var_container, x_min, x_max, depth, meta) + _add_epigraph_quadratic_approx!(container, C, names, time_steps, x_var, x_min, x_max, depth, meta) Create a variable z that lower-bounds x² using tangent-line cuts (Q^{L1} relaxation). @@ -30,7 +30,7 @@ The maximum underestimation gap between the tangent envelope and x² is - `::Type{C}`: component type - `names::Vector{String}`: component names - `time_steps::UnitRange{Int}`: time periods -- `x_var_container`: container of variables indexed by (name, t) +- `x_var`: container of variables indexed by (name, t) - `x_min::Float64`: lower bound of x domain - `x_max::Float64`: upper bound of x domain - `depth::Int`: epigraph depth L1 (uses 2^depth + 1 tangent breakpoints) @@ -41,7 +41,7 @@ function _add_epigraph_quadratic_approx!( ::Type{C}, names::Vector{String}, time_steps::UnitRange{Int}, - x_var_container, + x_var, x_min::Float64, x_max::Float64, depth::Int, @@ -51,123 +51,77 @@ function _add_epigraph_quadratic_approx!( IS.@assert_op depth >= 1 jump_model = get_jump_model(container) delta = x_max - x_min + g_levels = 0:depth - # Create z variable container - z_container = add_variable_container!( - container, - EpigraphVariable(), - C, - names, - time_steps; - meta, - ) + z_var = @_add_container(variable, EpigraphVariable) + g_var = @_add_container(variable, SawtoothAuxVariable, g_levels) + lp_cons = @_add_container(constraints, SawtoothLPConstraint, 1:2) + link_cons = @_add_container(constraints, SawtoothLinkingConstraint) + tangent_cons = + @_add_container(constraints, EpigraphTangentConstraint, 1:(depth + 2), sparse) + result_expr = @_add_container(expression, EpigraphExpression) - g_levels = 0:depth - g_container = add_variable_container!( - container, - SawtoothAuxVariable(), - C, - names, - g_levels, - time_steps; - meta, - ) - lp_container = add_constraints_container!( - container, - SawtoothLPConstraint(), - C, - names, - 1:2, - time_steps; - meta, - ) - link_container = add_constraints_container!( - container, - SawtoothLinkingConstraint(), - C, - names, - time_steps; - meta, - ) - - # Create tangent constraint container (sparse: name × breakpoint_idx × t) - tangent_container = add_constraints_container!( - container, - EpigraphTangentConstraint(), - C, - names, - 1:(depth + 2), - time_steps; - meta, - sparse = true, - ) - - expr_container = add_expression_container!( - container, - EpigraphExpression(), - C, - names, - time_steps; - meta, - ) + # Upper bound for epigraph variable z ≈ x² + z_ub = max(x_min^2, x_max^2) for name in names, t in time_steps - x_var = x_var_container[name, t] + x = x_var[name, t] # Auxiliary variables g_0,...,g_L ∈ [0, 1] for j in g_levels - g_container[name, j, t] = JuMP.@variable( + g_var[name, j, t] = JuMP.@variable( jump_model, base_name = "SawtoothAux_$(C)_{$(name), $(j), $(t)}", lower_bound = 0.0, upper_bound = 1.0, ) end - g0 = g_container[name, 0, t] + g0 = g_var[name, 0, t] # Linking constraint: g_0 = (x - x_min) / Δ - link_container[name, t] = JuMP.@constraint( + link_cons[name, t] = JuMP.@constraint( jump_model, - g0 == (x_var - x_min) / delta, + g0 == (x - x_min) / delta, ) # T^L constraints for j = 1,...,L for j in 1:depth - g_prev = g_container[name, j - 1, t] - g_curr = g_container[name, j, t] + g_prev = g_var[name, j - 1, t] + g_curr = g_var[name, j, t] # g_j ≤ 2 g_{j-1} - lp_container[name, 1, t] = JuMP.@constraint(jump_model, g_curr <= 2.0 * g_prev) + lp_cons[name, 1, t] = JuMP.@constraint(jump_model, g_curr <= 2.0 * g_prev) # g_j ≤ 2(1 - g_{j-1}) - lp_container[name, 2, t] = + lp_cons[name, 2, t] = JuMP.@constraint(jump_model, g_curr <= 2.0 * (1.0 - g_prev)) end # Create the epigraph variable (bounded from below by tangent cuts) - z_var = JuMP.@variable( - jump_model, - base_name = "EpigraphVar_$(C)_{$(name), $(t)}", - lower_bound = 0.0, - ) - z_container[name, t] = z_var + z = + z_var[name, t] = JuMP.@variable( + jump_model, + base_name = "EpigraphVar_$(C)_{$(name), $(t)}", + lower_bound = 0.0, + upper_bound = z_ub, + ) fL = JuMP.AffExpr(0.0) for j in 1:depth - JuMP.add_to_expression!(fL, delta * delta * 2.0^(-2j), g_container[name, j, t]) - tangent_container[(name, j + 1, t)] = JuMP.@constraint( + JuMP.add_to_expression!(fL, delta * delta * 2.0^(-2j), g_var[name, j, t]) + tangent_cons[(name, j + 1, t)] = JuMP.@constraint( jump_model, - z_var >= + z >= x_min * (2 * delta * g0 + x_min) - fL + delta^2 * (g0 - 2.0^(-2j - 2)) ) end - tangent_container[name, 1, t] = JuMP.@constraint(jump_model, z_var >= 0) - tangent_container[name, depth + 1, t] = JuMP.@constraint( + tangent_cons[name, 1, t] = JuMP.@constraint(jump_model, z >= 0) + tangent_cons[name, depth + 1, t] = JuMP.@constraint( jump_model, - z_var >= 2.0 * x_min - 1.0 + 2.0 * delta * g_container[name, 0, t] + z >= 2.0 * x_min - 1.0 + 2.0 * delta * g0 ) - expr_container[name, t] = JuMP.AffExpr(0.0, z_var => 1.0) + result_expr[name, t] = JuMP.AffExpr(0.0, z => 1.0) end - return + return result_expr end diff --git a/src/common_models/add_pwl_methods.jl b/src/quadratic_approximations/incremental.jl similarity index 81% rename from src/common_models/add_pwl_methods.jl rename to src/quadratic_approximations/incremental.jl index f8fc4d5..a2b8782 100644 --- a/src/common_models/add_pwl_methods.jl +++ b/src/quadratic_approximations/incremental.jl @@ -1,58 +1,11 @@ -""" - _get_breakpoints_for_pwl_function(min_val, max_val, f; num_segments = DEFAULT_INTERPOLATION_LENGTH) - -Generate breakpoints for piecewise linear (PWL) approximation of a nonlinear function. - -This function creates equally-spaced breakpoints over the specified domain [min_val, max_val] -and evaluates the given function at each breakpoint to construct a piecewise linear approximation. -The breakpoints are used in optimization problems to linearize nonlinear constraints or objectives. - -# Arguments -- `min_val::Float64`: Minimum value of the domain for the PWL approximation -- `max_val::Float64`: Maximum value of the domain for the PWL approximation -- `f`: Function to be approximated (must be callable with Float64 input) -- `num_segments::Int`: Number of linear segments in the PWL approximation (default: DEFAULT_INTERPOLATION_LENGTH) - -# Returns -- `Tuple{Vector{Float64}, Vector{Float64}}`: A tuple containing: - - `x_bkpts`: Vector of x-coordinates (breakpoints) in the domain - - `y_bkpts`: Vector of y-coordinates (function values at breakpoints) - -# Notes -- The number of breakpoints is `num_segments + 1` -- Breakpoints are equally spaced across the domain -- The first breakpoint is always at `min_val` and the last at `max_val` -""" -function _get_breakpoints_for_pwl_function( - min_val::Float64, - max_val::Float64, - f; - num_segments = DEFAULT_INTERPOLATION_LENGTH, -) - # Calculate total number of breakpoints (one more than segments) - # num_segments is the number of linear segments in the PWL approximation - # num_bkpts is the total number of breakpoints needed for the segments - num_bkpts = num_segments + 1 - - # Calculate step size for equally-spaced breakpoints - step = (max_val - min_val) / num_segments - - # Pre-allocate vectors for breakpoint coordinates - x_bkpts = Vector{Float64}(undef, num_bkpts) # Domain values (x-coordinates) - y_bkpts = Vector{Float64}(undef, num_bkpts) # Function values (y-coordinates) - - # Set the first breakpoint at the minimum domain value - x_bkpts[1] = min_val - y_bkpts[1] = f(min_val) - - # Generate remaining breakpoints by stepping through the domain - for i in 1:num_segments - x_val = min_val + step * i # Calculate x-coordinate of current breakpoint - x_bkpts[i + 1] = x_val - y_bkpts[i + 1] = f(x_val) # Evaluate function at current breakpoint - end - return x_bkpts, y_bkpts -end +# Incremental piecewise linear (PWL) formulation. +# +# This implements the incremental method for PWL approximation using δ (interpolation) +# and z (binary ordering) variables. Retained for downstream compatibility (POM HVDC models). +# +# The same mathematical problem (PWL approximation of nonlinear functions) is also solved by +# `_add_sos2_quadratic_approx!` (convex combination + SOS2) and +# `_add_sawtooth_quadratic_approx!` (sawtooth relaxation), which use different formulations. """ add_sparse_pwl_interpolation_variables!(container, devices, ::T, model, num_segments = DEFAULT_INTERPOLATION_LENGTH) @@ -137,7 +90,7 @@ function add_sparse_pwl_interpolation_variables!( ub = get_variable_upper_bound(T(), d, V()) ub !== nothing && JuMP.set_upper_bound(var_container[name, i, t], ub) - # Set lower bound if specified by the device formulation + # Set lower bound if specified by the device formulation lb = get_variable_lower_bound(T(), d, V()) lb !== nothing && JuMP.set_lower_bound(var_container[name, i, t], lb) end @@ -164,7 +117,7 @@ Binary variables z ensure the incremental property: δᵢ₊₁ ≤ zᵢ ≤ δ # Arguments - `container::OptimizationContainer`: The optimization container to add constraints to - `::R`: Type parameter for the original variable (x) -- `::S`: Type parameter for the approximated variable (y = f(x)) +- `::S`: Type parameter for the approximated variable (y = f(x)) - `::T`: Type parameter for the interpolation variables (δ) - `::U`: Type parameter for the binary interpolation variables (z) - `::V`: Type parameter for the constraint type @@ -175,7 +128,7 @@ Binary variables z ensure the incremental property: δᵢ₊₁ ≤ zᵢ ≤ δ # Type Parameters - `R <: VariableType`: Original variable type -- `S <: VariableType`: Approximated variable type +- `S <: VariableType`: Approximated variable type - `T <: VariableType`: Interpolation variable type - `U <: VariableType`: Binary interpolation variable type - `V <: ConstraintType`: Constraint type diff --git a/src/quadratic_approximations/manual_sos2.jl b/src/quadratic_approximations/manual_sos2.jl index e2aeec2..794ab3b 100644 --- a/src/quadratic_approximations/manual_sos2.jl +++ b/src/quadratic_approximations/manual_sos2.jl @@ -9,7 +9,7 @@ struct ManualSOS2SegmentSelectionConstraint <: ConstraintType end struct ManualSOS2AdjacencyConstraint <: ConstraintType end """ - _add_manual_sos2_quadratic_approx!(container, C, names, time_steps, x_var_container, x_min, x_max, num_segments, meta) + _add_manual_sos2_quadratic_approx!(container, C, names, time_steps, x_var, x_min, x_max, num_segments, meta) Approximate x² using a piecewise linear function with manually-implemented SOS2 constraints. @@ -23,7 +23,7 @@ expression container. - `::Type{C}`: component type - `names::Vector{String}`: component names - `time_steps::UnitRange{Int}`: time periods -- `x_var_container`: container of variables indexed by (name, t) +- `x_var`: container of variables indexed by (name, t) - `x_min::Float64`: lower bound of x domain - `x_max::Float64`: upper bound of x domain - `num_segments::Int`: number of PWL segments @@ -34,7 +34,7 @@ function _add_manual_sos2_quadratic_approx!( ::Type{C}, names::Vector{String}, time_steps::UnitRange{Int}, - x_var_container, + x_var, x_min::Float64, x_max::Float64, num_segments::Int, @@ -50,51 +50,14 @@ function _add_manual_sos2_quadratic_approx!( lambda_container = add_variable_container!(container, QuadraticApproxVariable(), C; meta) z_container = add_variable_container!(container, ManualSOS2BinaryVariable(), C; meta) - link_container = add_constraints_container!( - container, - QuadraticApproxLinkingConstraint(), - C, - names, - time_steps; - meta, - ) - norm_container = add_constraints_container!( - container, - QuadraticApproxNormalizationConstraint(), - C, - names, - time_steps; - meta, - ) - seg_container = add_constraints_container!( - container, - ManualSOS2SegmentSelectionConstraint(), - C, - names, - time_steps; - meta, - ) - adj_container = add_constraints_container!( - container, - ManualSOS2AdjacencyConstraint(), - C, - names, - 1:n_points, - time_steps; - meta, - ) - - expr_container = add_expression_container!( - container, - QuadraticApproximationExpression(), - C, - names, - time_steps; - meta, - ) + link_cons = @_add_container(constraints, QuadraticApproxLinkingConstraint) + norm_cons = @_add_container(constraints, QuadraticApproxNormalizationConstraint) + seg_cons = @_add_container(constraints, ManualSOS2SegmentSelectionConstraint) + adj_cons = @_add_container(constraints, ManualSOS2AdjacencyConstraint, 1:n_points) + result_expr = @_add_container(expression, QuadraticApproximationExpression) for name in names, t in time_steps - x_var = x_var_container[name, t] + x = x_var[name, t] # Create lambda variables: λ_i ∈ [0, 1] lambda = Vector{JuMP.VariableRef}(undef, n_points) @@ -109,13 +72,13 @@ function _add_manual_sos2_quadratic_approx!( end # x = Σ λ_i * x_i - link_container[name, t] = JuMP.@constraint( + link_cons[name, t] = JuMP.@constraint( jump_model, - x_var == sum(lambda[i] * x_bkpts[i] for i in eachindex(x_bkpts)) + x == sum(lambda[i] * x_bkpts[i] for i in eachindex(x_bkpts)) ) # Σ λ_i = 1 - norm_container[name, t] = JuMP.@constraint(jump_model, sum(lambda) == 1.0) + norm_cons[name, t] = JuMP.@constraint(jump_model, sum(lambda) == 1.0) # Create binary segment-selection variables z_j z_vars = Vector{JuMP.VariableRef}(undef, n_bins) @@ -129,18 +92,18 @@ function _add_manual_sos2_quadratic_approx!( end # Σ z_j = 1 (segment selection) - seg_container[name, t] = JuMP.@constraint(jump_model, sum(z_vars) == 1) + seg_cons[name, t] = JuMP.@constraint(jump_model, sum(z_vars) == 1) # Adjacency constraints: λ_i ≤ z_{i-1} + z_i (with boundary cases) # λ_1 ≤ z_1 - adj_container[name, 1, t] = JuMP.@constraint(jump_model, lambda[1] <= z_vars[1]) + adj_cons[name, 1, t] = JuMP.@constraint(jump_model, lambda[1] <= z_vars[1]) # λ_i ≤ z_{i-1} + z_i for i = 2..n-1 for i in 2:(n_points - 1) - adj_container[name, i + 1, t] = + adj_cons[name, i + 1, t] = JuMP.@constraint(jump_model, lambda[i] <= z_vars[i - 1] + z_vars[i]) end # λ_n ≤ z_{n-1} - adj_container[name, n_points, t] = + adj_cons[name, n_points, t] = JuMP.@constraint(jump_model, lambda[n_points] <= z_vars[n_bins]) # Build x̂² = Σ λ_i * x_i² as an affine expression @@ -148,8 +111,8 @@ function _add_manual_sos2_quadratic_approx!( for i in 1:n_points JuMP.add_to_expression!(x_hat_sq, x_sq_bkpts[i], lambda[i]) end - expr_container[name, t] = x_hat_sq + result_expr[name, t] = x_hat_sq end - return + return result_expr end diff --git a/src/quadratic_approximations/pwl_utils.jl b/src/quadratic_approximations/pwl_utils.jl new file mode 100644 index 0000000..46aed97 --- /dev/null +++ b/src/quadratic_approximations/pwl_utils.jl @@ -0,0 +1,59 @@ +# Shared utilities for piecewise linear approximation methods. + +""" + _get_breakpoints_for_pwl_function(min_val, max_val, f; num_segments = DEFAULT_INTERPOLATION_LENGTH) + +Generate breakpoints for piecewise linear (PWL) approximation of a nonlinear function. + +This function creates equally-spaced breakpoints over the specified domain [min_val, max_val] +and evaluates the given function at each breakpoint to construct a piecewise linear approximation. +The breakpoints are used in optimization problems to linearize nonlinear constraints or objectives. + +# Arguments +- `min_val::Float64`: Minimum value of the domain for the PWL approximation +- `max_val::Float64`: Maximum value of the domain for the PWL approximation +- `f`: Function to be approximated (must be callable with Float64 input) +- `num_segments::Int`: Number of linear segments in the PWL approximation (default: DEFAULT_INTERPOLATION_LENGTH) + +# Returns +- `Tuple{Vector{Float64}, Vector{Float64}}`: A tuple containing: + - `x_bkpts`: Vector of x-coordinates (breakpoints) in the domain + - `y_bkpts`: Vector of y-coordinates (function values at breakpoints) + +# Notes +- The number of breakpoints is `num_segments + 1` +- Breakpoints are equally spaced across the domain +- The first breakpoint is always at `min_val` and the last at `max_val` +""" +function _get_breakpoints_for_pwl_function( + min_val::Float64, + max_val::Float64, + f; + num_segments = DEFAULT_INTERPOLATION_LENGTH, +) + # Calculate total number of breakpoints (one more than segments) + # num_segments is the number of linear segments in the PWL approximation + # num_bkpts is the total number of breakpoints needed for the segments + num_bkpts = num_segments + 1 + + # Calculate step size for equally-spaced breakpoints + step = (max_val - min_val) / num_segments + + # Pre-allocate vectors for breakpoint coordinates + x_bkpts = Vector{Float64}(undef, num_bkpts) # Domain values (x-coordinates) + y_bkpts = Vector{Float64}(undef, num_bkpts) # Function values (y-coordinates) + + # Set the first breakpoint at the minimum domain value + x_bkpts[1] = min_val + y_bkpts[1] = f(min_val) + + # Generate remaining breakpoints by stepping through the domain + for i in 1:num_segments + x_val = min_val + step * i # Calculate x-coordinate of current breakpoint + x_bkpts[i + 1] = x_val + y_bkpts[i + 1] = f(x_val) # Evaluate function at current breakpoint + end + return x_bkpts, y_bkpts +end + +_square(x::Float64) = x * x diff --git a/src/quadratic_approximations/sawtooth.jl b/src/quadratic_approximations/sawtooth.jl index c0661e3..59b6645 100644 --- a/src/quadratic_approximations/sawtooth.jl +++ b/src/quadratic_approximations/sawtooth.jl @@ -13,7 +13,7 @@ struct SawtoothMIPConstraint <: ConstraintType end struct SawtoothLPConstraint <: ConstraintType end """ - _add_sawtooth_quadratic_approx!(container, C, names, time_steps, x_var_container, x_min, x_max, depth, meta) + _add_sawtooth_quadratic_approx!(container, C, names, time_steps, x_var, x_min, x_max, depth, meta) Approximate x² using the sawtooth MIP formulation. @@ -30,7 +30,7 @@ with maximum overestimation error Δ² · 2^{-2L-2} where Δ = x_max - x_min. - `::Type{C}`: component type - `names::Vector{String}`: component names - `time_steps::UnitRange{Int}`: time periods -- `x_var_container`: container of variables indexed by (name, t) +- `x_var`: container of variables indexed by (name, t) - `x_min::Float64`: lower bound of x domain - `x_max::Float64`: upper bound of x domain - `depth::Int`: sawtooth depth L (number of binary variables per component per time step) @@ -41,7 +41,7 @@ function _add_sawtooth_quadratic_approx!( ::Type{C}, names::Vector{String}, time_steps::UnitRange{Int}, - x_var_container, + x_var, x_min::Float64, x_max::Float64, depth::Int, @@ -55,58 +55,21 @@ function _add_sawtooth_quadratic_approx!( # Create containers with known dimensions g_levels = 0:depth alpha_levels = 1:depth - g_container = add_variable_container!( - container, - SawtoothAuxVariable(), - C, - names, - g_levels, - time_steps; - meta, - ) - alpha_container = add_variable_container!( - container, - SawtoothBinaryVariable(), - C, - names, - alpha_levels, - time_steps; - meta, - ) - mip_container = add_constraints_container!( - container, - SawtoothMIPConstraint(), - C, - names, - 1:4, - time_steps; - meta, - sparse = true, - ) - link_container = add_constraints_container!( - container, - SawtoothLinkingConstraint(), - C, - names, - time_steps; - meta, - ) - - expr_container = add_expression_container!( - container, - QuadraticApproximationExpression(), - C, - names, - time_steps; - meta, - ) + g_var = @_add_container(variable, SawtoothAuxVariable, g_levels) + alpha_var = @_add_container(variable, SawtoothBinaryVariable, alpha_levels) + mip_cons = @_add_container(constraints, SawtoothMIPConstraint, 1:4, sparse) + link_cons = @_add_container(constraints, SawtoothLinkingConstraint) + result_expr = @_add_container(expression, QuadraticApproximationExpression) + + # Precompute sawtooth coefficients (invariant across names and time steps) + saw_coeffs = [delta * delta * (2.0^(-2 * j)) for j in alpha_levels] for name in names, t in time_steps - x_var = x_var_container[name, t] + x = x_var[name, t] # Auxiliary variables g_0,...,g_L ∈ [0, 1] for j in g_levels - g_container[name, j, t] = JuMP.@variable( + g_var[name, j, t] = JuMP.@variable( jump_model, base_name = "SawtoothAux_$(C)_{$(name), $(j), $(t)}", lower_bound = 0.0, @@ -116,7 +79,7 @@ function _add_sawtooth_quadratic_approx!( # Binary variables α_1,...,α_L for j in alpha_levels - alpha_container[name, j, t] = JuMP.@variable( + alpha_var[name, j, t] = JuMP.@variable( jump_model, base_name = "SawtoothBin_$(C)_{$(name), $(j), $(t)}", binary = true, @@ -124,27 +87,27 @@ function _add_sawtooth_quadratic_approx!( end # Linking constraint: g_0 = (x - x_min) / Δ - link_container[name, t] = JuMP.@constraint( + link_cons[name, t] = JuMP.@constraint( jump_model, - g_container[name, 0, t] == (x_var - x_min) / delta, + g_var[name, 0, t] == (x - x_min) / delta, ) # S^L constraints for j = 1,...,L for j in alpha_levels - g_prev = g_container[name, j - 1, t] - g_curr = g_container[name, j, t] - alpha_j = alpha_container[name, j, t] + g_prev = g_var[name, j - 1, t] + g_curr = g_var[name, j, t] + alpha_j = alpha_var[name, j, t] # g_j ≤ 2 g_{j-1} - mip_container[name, 1, t] = JuMP.@constraint(jump_model, g_curr <= 2.0 * g_prev) + mip_cons[name, 1, t] = JuMP.@constraint(jump_model, g_curr <= 2.0 * g_prev) # g_j ≤ 2(1 - g_{j-1}) - mip_container[name, 2, t] = + mip_cons[name, 2, t] = JuMP.@constraint(jump_model, g_curr <= 2.0 * (1.0 - g_prev)) # g_j ≥ 2(g_{j-1} - α_j) - mip_container[name, 3, t] = + mip_cons[name, 3, t] = JuMP.@constraint(jump_model, g_curr >= 2.0 * (g_prev - alpha_j)) # g_j ≥ 2(α_j - g_{j-1}) - mip_container[name, 4, t] = + mip_cons[name, 4, t] = JuMP.@constraint(jump_model, g_curr >= 2.0 * (alpha_j - g_prev)) end @@ -153,15 +116,14 @@ function _add_sawtooth_quadratic_approx!( JuMP.add_to_expression!( x_sq_approx, 2.0 * x_min * delta + delta * delta, - g_container[name, 0, t], + g_var[name, 0, t], ) for j in alpha_levels - coeff = delta * delta * (2.0^(-2 * j)) - JuMP.add_to_expression!(x_sq_approx, -coeff, g_container[name, j, t]) + JuMP.add_to_expression!(x_sq_approx, -saw_coeffs[j], g_var[name, j, t]) end - expr_container[name, t] = x_sq_approx + result_expr[name, t] = x_sq_approx end - return + return result_expr end diff --git a/src/quadratic_approximations/solver_sos2.jl b/src/quadratic_approximations/solver_sos2.jl index 2b603e1..0b1f3ec 100644 --- a/src/quadratic_approximations/solver_sos2.jl +++ b/src/quadratic_approximations/solver_sos2.jl @@ -4,21 +4,23 @@ "Expression container for quadratic (x²) approximation results." struct QuadraticApproximationExpression <: ExpressionType end -"Lambda (λ) convex combination weight variables for SOS2 quadratic approximation." +"lambda_var (λ) convex combination weight variables for SOS2 quadratic approximation." struct QuadraticApproxVariable <: SparseVariableType end "Links x to the weighted sum of breakpoints in SOS2 quadratic approximation." struct QuadraticApproxLinkingConstraint <: ConstraintType end "Ensures the sum of λ weights equals 1 in SOS2 quadratic approximation." struct QuadraticApproxNormalizationConstraint <: ConstraintType end +struct QuadraticApproxLinkingExpression <: ExpressionType end + struct SolverSOS2Constraint <: ConstraintType end """ - _add_sos2_quadratic_approx!(container, C, names, time_steps, x_var_container, x_min, x_max, num_segments, meta) + _add_sos2_quadratic_approx!(container, C, names, time_steps, x_var, x_min, x_max, num_segments, meta) Approximate x² using a piecewise linear function with solver-native SOS2 constraints. -Creates lambda (λ) variables representing convex combination weights over breakpoints, +Creates lambda_var (λ) variables representing convex combination weights over breakpoints, adds linking, normalization, and MOI.SOS2 constraints, and stores affine expressions approximating x² in a `QuadraticApproximationExpression` expression container. @@ -27,7 +29,7 @@ approximating x² in a `QuadraticApproximationExpression` expression container. - `::Type{C}`: component type - `names::Vector{String}`: component names - `time_steps::UnitRange{Int}`: time periods -- `x_var_container`: container of variables indexed by (name, t) +- `x_var`: container of variables indexed by (name, t) - `x_min::Float64`: lower bound of x domain - `x_max::Float64`: upper bound of x domain - `num_segments::Int`: number of PWL segments @@ -38,7 +40,7 @@ function _add_sos2_quadratic_approx!( ::Type{C}, names::Vector{String}, time_steps::UnitRange{Int}, - x_var_container, + x_var, x_min::Float64, x_max::Float64, num_segments::Int, @@ -50,50 +52,22 @@ function _add_sos2_quadratic_approx!( jump_model = get_jump_model(container) # Create all containers upfront - lambda_container = + lambda_var = # how is this working with no axes? add_variable_container!(container, QuadraticApproxVariable(), C; meta) - link_container = add_constraints_container!( - container, - QuadraticApproxLinkingConstraint(), - C, - names, - time_steps; - meta, - ) - norm_container = add_constraints_container!( - container, - QuadraticApproxNormalizationConstraint(), - C, - names, - time_steps; - meta, - ) - sos_container = add_constraints_container!( - container, - SolverSOS2Constraint(), - C, - names, - time_steps; - meta, - ) - - expr_container = add_expression_container!( - container, - QuadraticApproximationExpression(), - C, - names, - time_steps; - meta, - ) + link_expr = @_add_container(expression, QuadraticApproxLinkingExpression) + link_cons = @_add_container(constraints, QuadraticApproxLinkingConstraint) + norm_cons = @_add_container(constraints, QuadraticApproxNormalizationConstraint) + sos_cons = @_add_container(constraints, SolverSOS2Constraint) + result_expr = @_add_container(expression, QuadraticApproximationExpression) for name in names, t in time_steps - x_var = x_var_container[name, t] + x = x_var[name, t] - # Create lambda variables: λ_i ∈ [0, 1] + # Create lambda_var variables: λ_i ∈ [0, 1] lambda = Vector{JuMP.VariableRef}(undef, n_points) for i in 1:n_points lambda[i] = - lambda_container[(name, i, t)] = JuMP.@variable( + lambda_var[(name, i, t)] = JuMP.@variable( jump_model, base_name = "QuadraticApproxVariable_$(C)_{$(name), pwl_$(i), $(t)}", lower_bound = 0.0, @@ -102,16 +76,17 @@ function _add_sos2_quadratic_approx!( end # x = Σ λ_i * x_i - link_container[name, t] = JuMP.@constraint( - jump_model, - x_var == sum(lambda[i] * x_bkpts[i] for i in eachindex(x_bkpts)) - ) + link = link_expr[name, t] = JuMP.AffExpr(0.0) + for i in eachindex(x_bkpts) + JuMP.add_to_expression!(link, lambda[i], x_bkpts[i]) + end + link_cons[name, t] = JuMP.@constraint(jump_model, x == link) # Σ λ_i = 1 - norm_container[name, t] = JuMP.@constraint(jump_model, sum(lambda) == 1.0) + norm_cons[name, t] = JuMP.@constraint(jump_model, sum(lambda) == 1.0) # λ ∈ SOS2 (solver-native) - sos_container[name, t] = + sos_cons[name, t] = JuMP.@constraint(jump_model, lambda in MOI.SOS2(collect(1:n_points))) # Build x̂² = Σ λ_i * x_i² as an affine expression @@ -119,10 +94,8 @@ function _add_sos2_quadratic_approx!( for i in 1:n_points JuMP.add_to_expression!(x_hat_sq, x_sq_bkpts[i], lambda[i]) end - expr_container[name, t] = x_hat_sq + result_expr[name, t] = x_hat_sq end - return + return result_expr end - -_square(x::Float64) = x * x diff --git a/test/InfrastructureOptimizationModelsTests.jl b/test/InfrastructureOptimizationModelsTests.jl index 7c8a4a1..ff37e23 100644 --- a/test/InfrastructureOptimizationModelsTests.jl +++ b/test/InfrastructureOptimizationModelsTests.jl @@ -37,6 +37,7 @@ include(joinpath(TEST_DIR, "mocks/mock_container.jl")) include(joinpath(TEST_DIR, "mocks/constructors.jl")) include(joinpath(TEST_DIR, "test_utils/test_types.jl")) include(joinpath(TEST_DIR, "test_utils/objective_function_helpers.jl")) +include(joinpath(TEST_DIR, "test_utils/qa_bilinear_helpers.jl")) # Environment flags for test selection const RUN_UNIT_TESTS = get(ENV, "IOM_RUN_UNIT_TESTS", "true") == "true" @@ -144,6 +145,7 @@ function run_tests() include(joinpath(TEST_DIR, "test_quadratic_approximations.jl")) include(joinpath(TEST_DIR, "test_bilinear_approximations.jl")) include(joinpath(TEST_DIR, "test_hybs_approximations.jl")) + include(joinpath(TEST_DIR, "test_dnmdt_approximations.jl")) end #= diff --git a/test/test_bilinear_approximations.jl b/test/test_bilinear_approximations.jl index b983596..7cb681b 100644 --- a/test/test_bilinear_approximations.jl +++ b/test/test_bilinear_approximations.jl @@ -1,31 +1,5 @@ const BILINEAR_META = "BilinearTest" -function _setup_bilinear_test(device_names::Vector{String}, time_steps::UnitRange{Int}) - container = _setup_qa_container(time_steps) - x_var_container = IOM.add_variable_container!( - container, - TestOriginalVariable(), - MockThermalGen, - device_names, - time_steps, - ) - y_var_container = IOM.add_variable_container!( - container, - TestApproximatedVariable(), - MockThermalGen, - device_names, - time_steps, - ) - jump_model = IOM.get_jump_model(container) - for name in device_names, t in time_steps - x_var_container[name, t] = - JuMP.@variable(jump_model, base_name = "x_$(name)_$(t)") - y_var_container[name, t] = - JuMP.@variable(jump_model, base_name = "y_$(name)_$(t)") - end - return (; container, x_var_container, y_var_container, jump_model) -end - @testset "Bilinear Approximations" begin @testset "Solver SOS2 Bilinear" begin @testset "Constraint structure" begin diff --git a/test/test_dnmdt_approximations.jl b/test/test_dnmdt_approximations.jl new file mode 100644 index 0000000..ab1d72c --- /dev/null +++ b/test/test_dnmdt_approximations.jl @@ -0,0 +1,471 @@ +const DNMDT_META = "DNMDTTest" +const DNMDT_HYBS_META = "HybSTest" + +@testset "D-NMDT Univariate Approximation" begin + @testset "Binary expansion correctness" begin + names = ["gen1"] + ts = 1:1 + setup = _setup_qa_test(names, ts) + JuMP.set_lower_bound(setup.var_container["gen1", 1], 0.0) + JuMP.set_upper_bound(setup.var_container["gen1", 1], 1.0) + JuMP.fix(setup.var_container["gen1", 1], 0.6; force = true) + + IOM._add_dnmdt_univariate_approx!( + setup.container, MockThermalGen, names, ts, + setup.var_container, 0.0, 1.0, 4, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, Min, 0) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + + @test JuMP.termination_status(setup.jump_model) == JuMP.OPTIMAL + + xh = IOM.get_variable( + setup.container, IOM.DNMDTScaledVariable(), MockThermalGen, DNMDT_META, + ) + beta = IOM.get_variable( + setup.container, IOM.DNMDTBinaryVariable(), MockThermalGen, DNMDT_META, + ) + dx = IOM.get_variable( + setup.container, IOM.DNMDTResidualVariable(), MockThermalGen, DNMDT_META, + ) + + @test JuMP.value(xh["gen1", 1]) ≈ 0.6 atol = 1e-8 + reconstructed = + sum(2.0^(-j) * JuMP.value(beta["gen1", j, 1]) for j in 1:4) + + JuMP.value(dx["gen1", 1]) + @test reconstructed ≈ 0.6 atol = 1e-8 + end + + @testset "Relaxation validity (D-NMDT)" begin + test_points = [0.1, 0.3, 0.5, 0.7, 0.9] + for x0 in test_points + z_vals = Float64[] + for sense in [JuMP.MIN_SENSE, JuMP.MAX_SENSE] + setup = _setup_qa_test(["gen1"], 1:1) + JuMP.fix(setup.var_container["gen1", 1], x0; force = true) + + IOM._add_dnmdt_univariate_approx!( + setup.container, MockThermalGen, ["gen1"], 1:1, + setup.var_container, 0.0, 1.0, 3, DNMDT_META; + tighten = false, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTQuadraticExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, sense, expr["gen1", 1]) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + @test JuMP.termination_status(setup.jump_model) == JuMP.OPTIMAL + push!(z_vals, JuMP.objective_value(setup.jump_model)) + end + true_val = x0^2 + @test z_vals[1] <= true_val + 1e-6 + @test z_vals[2] >= true_val - 1e-6 + end + end + + @testset "Relaxation gap <= 2^(-2L-1)" begin + for L in [2, 3, 4] + max_gap = 0.0 + for x0 in range(0.0, 1.0; length = 11) + z_vals = Float64[] + for sense in [JuMP.MIN_SENSE, JuMP.MAX_SENSE] + setup = _setup_qa_test(["gen1"], 1:1) + JuMP.fix(setup.var_container["gen1", 1], x0; force = true) + + IOM._add_dnmdt_univariate_approx!( + setup.container, MockThermalGen, ["gen1"], 1:1, + setup.var_container, 0.0, 1.0, L, DNMDT_META; + tighten = false, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTQuadraticExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, sense, expr["gen1", 1]) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + push!(z_vals, JuMP.objective_value(setup.jump_model)) + end + gap = z_vals[2] - z_vals[1] + max_gap = max(max_gap, gap) + end + # Relaxation gap (UB - LB) bounded by eps_L^2 / 2 = 2^(-2L-1) + theoretical_bound = 2.0^(-2 * L - 1) + @test max_gap <= theoretical_bound + 1e-6 + end + end + + @testset "Constraint structure" begin + setup = _setup_qa_test(["gen1"], 1:1) + depth = 3 + + IOM._add_dnmdt_univariate_approx!( + setup.container, MockThermalGen, ["gen1"], 1:1, + setup.var_container, 0.0, 1.0, depth, DNMDT_META; + tighten = false, + ) + + # L binary variables for univariate + n_bin = count(JuMP.is_binary, JuMP.all_variables(setup.jump_model)) + @test n_bin == depth + + # Container keys exist + @test IOM.has_container_key( + setup.container, IOM.DNMDTScaledVariable, MockThermalGen, DNMDT_META, + ) + @test IOM.has_container_key( + setup.container, IOM.DNMDTBinaryVariable, MockThermalGen, DNMDT_META, + ) + @test IOM.has_container_key( + setup.container, IOM.DNMDTResidualVariable, MockThermalGen, DNMDT_META, + ) + @test IOM.has_container_key( + setup.container, IOM.DNMDTBackTransformConstraint, MockThermalGen, DNMDT_META, + ) + end + + @testset "Multiple time steps and names" begin + names = ["gen1", "gen2"] + ts = 1:3 + setup = _setup_qa_test(names, ts) + + IOM._add_dnmdt_univariate_approx!( + setup.container, MockThermalGen, names, ts, + setup.var_container, 0.0, 1.0, 2, DNMDT_META; + tighten = false, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTQuadraticExpression(), + MockThermalGen, DNMDT_META, + ) + + for name in names, t in ts + @test expr[name, t] isa JuMP.AffExpr + end + end +end + +@testset "T-D-NMDT Tightening" begin + @testset "T-D-NMDT lower bound >= D-NMDT lower bound" begin + for x0 in [0.15, 0.35, 0.65, 0.85] + lb_dnmdt = NaN + lb_tdnmdt = NaN + for tighten in [false, true] + setup = _setup_qa_test(["gen1"], 1:1) + JuMP.fix(setup.var_container["gen1", 1], x0; force = true) + + IOM._add_dnmdt_univariate_approx!( + setup.container, MockThermalGen, ["gen1"], 1:1, + setup.var_container, 0.0, 1.0, 2, DNMDT_META; + tighten = tighten, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTQuadraticExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, Min, expr["gen1", 1]) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + @test JuMP.termination_status(setup.jump_model) == JuMP.OPTIMAL + + if !tighten + lb_dnmdt = JuMP.objective_value(setup.jump_model) + else + lb_tdnmdt = JuMP.objective_value(setup.jump_model) + end + end + # T-D-NMDT should be at least as tight + @test lb_tdnmdt >= lb_dnmdt - 1e-6 + # Both should be valid lower bounds + @test lb_dnmdt <= x0^2 + 1e-6 + @test lb_tdnmdt <= x0^2 + 1e-6 + end + end + + @testset "Convergence with depth" begin + true_val = 0.35^2 + errors = Float64[] + for L in 1:4 + setup = _setup_qa_test(["gen1"], 1:1) + JuMP.fix(setup.var_container["gen1", 1], 0.35; force = true) + + IOM._add_dnmdt_univariate_approx!( + setup.container, MockThermalGen, ["gen1"], 1:1, + setup.var_container, 0.0, 1.0, L, DNMDT_META; + tighten = true, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTQuadraticExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, Max, expr["gen1", 1]) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + @test JuMP.termination_status(setup.jump_model) == JuMP.OPTIMAL + + push!(errors, abs(JuMP.objective_value(setup.jump_model) - true_val)) + end + for i in 2:length(errors) + @test errors[i] <= errors[i - 1] + 1e-10 + end + end +end + +@testset "D-NMDT Bivariate Approximation" begin + @testset "Relaxation validity" begin + test_points = [(0.3, 0.7), (0.5, 0.5), (0.1, 0.9), (0.8, 0.2)] + for (x0, y0) in test_points + z_vals = Float64[] + for sense in [JuMP.MIN_SENSE, JuMP.MAX_SENSE] + setup = _setup_bilinear_test(["dev1"], 1:1) + JuMP.fix(setup.x_var_container["dev1", 1], x0; force = true) + JuMP.fix(setup.y_var_container["dev1", 1], y0; force = true) + + IOM._add_dnmdt_bilinear_approx!( + setup.container, MockThermalGen, ["dev1"], 1:1, + setup.x_var_container, setup.y_var_container, + 0.0, 1.0, 0.0, 1.0, 2, DNMDT_META, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTBilinearExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, sense, expr["dev1", 1]) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + @test JuMP.termination_status(setup.jump_model) == JuMP.OPTIMAL + push!(z_vals, JuMP.objective_value(setup.jump_model)) + end + true_val = x0 * y0 + @test z_vals[1] <= true_val + 1e-6 + @test z_vals[2] >= true_val - 1e-6 + end + end + + @testset "Relaxation gap <= 2^(-2L-1)" begin + for L in [2, 3] + max_gap = 0.0 + for x0 in range(0.05, 0.95; length = 5) + for y0 in range(0.05, 0.95; length = 5) + z_vals = Float64[] + for sense in [JuMP.MIN_SENSE, JuMP.MAX_SENSE] + setup = _setup_bilinear_test(["dev1"], 1:1) + JuMP.fix(setup.x_var_container["dev1", 1], x0; force = true) + JuMP.fix(setup.y_var_container["dev1", 1], y0; force = true) + + IOM._add_dnmdt_bilinear_approx!( + setup.container, MockThermalGen, ["dev1"], 1:1, + setup.x_var_container, setup.y_var_container, + 0.0, 1.0, 0.0, 1.0, L, DNMDT_META, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTBilinearExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, sense, expr["dev1", 1]) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + push!(z_vals, JuMP.objective_value(setup.jump_model)) + end + gap = z_vals[2] - z_vals[1] + max_gap = max(max_gap, gap) + end + end + # Relaxation gap (UB - LB) bounded by eps_L^2 / 2 = 2^(-2L-1) + theoretical_bound = 2.0^(-2 * L - 1) + @test max_gap <= theoretical_bound + 1e-6 + end + end + + @testset "General bounds (non-unit intervals)" begin + x_min, x_max = 0.2, 0.8 + y_min, y_max = -0.3, 1.5 + test_points = [(0.5, 0.6), (0.3, 1.0), (0.7, -0.1)] + for (x0, y0) in test_points + z_vals = Float64[] + for sense in [JuMP.MIN_SENSE, JuMP.MAX_SENSE] + setup = _setup_bilinear_test(["dev1"], 1:1) + JuMP.fix(setup.x_var_container["dev1", 1], x0; force = true) + JuMP.fix(setup.y_var_container["dev1", 1], y0; force = true) + + IOM._add_dnmdt_bilinear_approx!( + setup.container, MockThermalGen, ["dev1"], 1:1, + setup.x_var_container, setup.y_var_container, + x_min, x_max, y_min, y_max, 3, DNMDT_META, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTBilinearExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, sense, expr["dev1", 1]) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + @test JuMP.termination_status(setup.jump_model) == JuMP.OPTIMAL + push!(z_vals, JuMP.objective_value(setup.jump_model)) + end + true_val = x0 * y0 + @test z_vals[1] <= true_val + 1e-6 + @test z_vals[2] >= true_val - 1e-6 + end + end + + @testset "Constraint structure" begin + setup = _setup_bilinear_test(["dev1"], 1:1) + depth = 2 + + IOM._add_dnmdt_bilinear_approx!( + setup.container, MockThermalGen, ["dev1"], 1:1, + setup.x_var_container, setup.y_var_container, + 0.0, 1.0, 0.0, 1.0, depth, DNMDT_META, + ) + + # 2L binary variables for bivariate + n_bin = count(JuMP.is_binary, JuMP.all_variables(setup.jump_model)) + @test n_bin == 2 * depth + + # Container keys exist + @test IOM.has_container_key( + setup.container, IOM.DNMDTScaledVariable, MockThermalGen, + DNMDT_META * "_x", + ) + @test IOM.has_container_key( + setup.container, IOM.DNMDTScaledVariable, MockThermalGen, + DNMDT_META * "_y", + ) + @test IOM.has_container_key( + setup.container, IOM.DNMDTBilinearExpression, MockThermalGen, + DNMDT_META, + ) + end + + @testset "McCormick toggle" begin + setup = _setup_bilinear_test(["dev1"], 1:1) + + IOM._add_dnmdt_bilinear_approx!( + setup.container, MockThermalGen, ["dev1"], 1:1, + setup.x_var_container, setup.y_var_container, + 0.0, 1.0, 0.0, 1.0, 2, DNMDT_META; + add_mccormick = false, + ) + + @test !IOM.has_container_key( + setup.container, IOM.McCormickConstraint, MockThermalGen, DNMDT_META, + ) + end + + @testset "Fixed-variable correctness" begin + setup = _setup_bilinear_test(["dev1"], 1:1) + JuMP.fix(setup.x_var_container["dev1", 1], 2.0; force = true) + JuMP.fix(setup.y_var_container["dev1", 1], 3.0; force = true) + + IOM._add_dnmdt_bilinear_approx!( + setup.container, MockThermalGen, ["dev1"], 1:1, + setup.x_var_container, setup.y_var_container, + 0.0, 4.0, 0.0, 4.0, 3, DNMDT_META, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTBilinearExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup.jump_model, Max, expr["dev1", 1]) + JuMP.set_optimizer(setup.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup.jump_model) + JuMP.optimize!(setup.jump_model) + + @test JuMP.termination_status(setup.jump_model) == JuMP.OPTIMAL + @test JuMP.objective_value(setup.jump_model) ≈ 6.0 atol = 0.5 + end + + @testset "Multiple time steps" begin + setup = _setup_bilinear_test(["dev1"], 1:3) + + IOM._add_dnmdt_bilinear_approx!( + setup.container, MockThermalGen, ["dev1"], 1:3, + setup.x_var_container, setup.y_var_container, + 0.0, 1.0, 0.0, 1.0, 2, DNMDT_META, + ) + expr = IOM.get_expression( + setup.container, IOM.DNMDTBilinearExpression(), + MockThermalGen, DNMDT_META, + ) + + for t in 1:3 + @test expr["dev1", t] isa JuMP.AffExpr + end + end + + @testset "D-NMDT vs HybS comparison" begin + true_product = 0.4 * 0.7 + for depth in [2, 3] + # D-NMDT + setup_d = _setup_bilinear_test(["dev1"], 1:1) + JuMP.fix(setup_d.x_var_container["dev1", 1], 0.4; force = true) + JuMP.fix(setup_d.y_var_container["dev1", 1], 0.7; force = true) + + IOM._add_dnmdt_bilinear_approx!( + setup_d.container, MockThermalGen, ["dev1"], 1:1, + setup_d.x_var_container, setup_d.y_var_container, + 0.0, 1.0, 0.0, 1.0, depth, DNMDT_META, + ) + expr_d = IOM.get_expression( + setup_d.container, IOM.DNMDTBilinearExpression(), + MockThermalGen, DNMDT_META, + ) + + JuMP.@objective(setup_d.jump_model, Max, expr_d["dev1", 1]) + JuMP.set_optimizer(setup_d.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup_d.jump_model) + JuMP.optimize!(setup_d.jump_model) + dnmdt_gap = abs(JuMP.objective_value(setup_d.jump_model) - true_product) + + # HybS + setup_h = _setup_bilinear_test(["dev1"], 1:1) + JuMP.fix(setup_h.x_var_container["dev1", 1], 0.4; force = true) + JuMP.fix(setup_h.y_var_container["dev1", 1], 0.7; force = true) + + IOM._add_hybs_bilinear_approx!( + setup_h.container, MockThermalGen, ["dev1"], 1:1, + setup_h.x_var_container, setup_h.y_var_container, + 0.0, 1.0, 0.0, 1.0, depth, DNMDT_HYBS_META, + ) + expr_h = IOM.get_expression( + setup_h.container, IOM.BilinearProductExpression(), + MockThermalGen, DNMDT_HYBS_META, + ) + + JuMP.@objective(setup_h.jump_model, Max, expr_h["dev1", 1]) + JuMP.set_optimizer(setup_h.jump_model, HiGHS.Optimizer) + JuMP.set_silent(setup_h.jump_model) + JuMP.optimize!(setup_h.jump_model) + hybs_gap = abs(JuMP.objective_value(setup_h.jump_model) - true_product) + + # D-NMDT should be at least as tight (same binary budget) + @test dnmdt_gap <= hybs_gap + 1e-6 + + # Both use same number of binaries: 2L + n_bin_d = count(JuMP.is_binary, JuMP.all_variables(setup_d.jump_model)) + n_bin_h = count(JuMP.is_binary, JuMP.all_variables(setup_h.jump_model)) + @test n_bin_d == n_bin_h + end + end +end diff --git a/test/test_hybs_approximations.jl b/test/test_hybs_approximations.jl index d235c2c..526d6ff 100644 --- a/test/test_hybs_approximations.jl +++ b/test/test_hybs_approximations.jl @@ -1,4 +1,5 @@ const HYBS_META = "HybSTest" +const HYBS_BILINEAR_META = "BilinearTest" @testset "Epigraph Quadratic Approximation" begin @testset "Constraint structure" begin @@ -14,13 +15,13 @@ const HYBS_META = "HybSTest" 0.0, 4.0, depth, - TEST_META, + HYBS_META, ) expr_container = IOM.get_expression( setup.container, IOM.EpigraphExpression(), MockThermalGen, - TEST_META, + HYBS_META, ) @test expr_container["dev1", 1] isa JuMP.AffExpr @@ -44,13 +45,13 @@ const HYBS_META = "HybSTest" 0.0, 1.0, 4, - TEST_META, + HYBS_META, ) expr_container = IOM.get_expression( setup.container, IOM.EpigraphExpression(), MockThermalGen, - TEST_META, + HYBS_META, ) z_epi = expr_container["dev1", 1] @@ -78,13 +79,13 @@ const HYBS_META = "HybSTest" 0.0, 2.0, 4, - TEST_META, + HYBS_META, ) expr_container = IOM.get_expression( setup.container, IOM.EpigraphExpression(), MockThermalGen, - TEST_META, + HYBS_META, ) z_epi = expr_container["dev1", 1] @@ -109,13 +110,13 @@ const HYBS_META = "HybSTest" 0.0, 4.0, 2, - TEST_META, + HYBS_META, ) expr_container = IOM.get_expression( setup.container, IOM.EpigraphExpression(), MockThermalGen, - TEST_META, + HYBS_META, ) for t in 1:3 @@ -140,13 +141,13 @@ const HYBS_META = "HybSTest" 0.0, 1.0, depth, - TEST_META, + HYBS_META, ) expr_container = IOM.get_expression( setup.container, IOM.EpigraphExpression(), MockThermalGen, - TEST_META, + HYBS_META, ) z_epi = expr_container["dev1", 1] @@ -537,7 +538,7 @@ end 0.0, 1.0, depth, - BILINEAR_META, + HYBS_BILINEAR_META, ) n_bin_bin2 = count(JuMP.is_binary, JuMP.all_variables(setup_b.jump_model)) diff --git a/test/test_quadratic_approximations.jl b/test/test_quadratic_approximations.jl index 955bf3e..17fd2ce 100644 --- a/test/test_quadratic_approximations.jl +++ b/test/test_quadratic_approximations.jl @@ -1,34 +1,6 @@ const MOI = JuMP.MOI const TEST_META = "TestVar" -function _setup_qa_container(time_steps::UnitRange{Int}) - sys = MockSystem(100.0) - settings = IOM.Settings( - sys; - horizon = Dates.Hour(length(time_steps)), - resolution = Dates.Hour(1), - ) - container = IOM.OptimizationContainer(sys, settings, JuMP.Model(), IS.Deterministic) - IOM.set_time_steps!(container, time_steps) - return container -end - -function _setup_qa_test(device_names::Vector{String}, time_steps::UnitRange{Int}) - container = _setup_qa_container(time_steps) - var_container = IOM.add_variable_container!( - container, - TestOriginalVariable(), - MockThermalGen, - device_names, - time_steps, - ) - jump_model = IOM.get_jump_model(container) - for name in device_names, t in time_steps - var_container[name, t] = JuMP.@variable(jump_model, base_name = "x_$(name)_$(t)",) - end - return (; container, var_container, jump_model) -end - @testset "Quadratic Approximations" begin @testset "Solver SOS2" begin @testset "Constraint structure" begin diff --git a/test/test_utils/qa_bilinear_helpers.jl b/test/test_utils/qa_bilinear_helpers.jl new file mode 100644 index 0000000..275ce6a --- /dev/null +++ b/test/test_utils/qa_bilinear_helpers.jl @@ -0,0 +1,53 @@ +function _setup_qa_container(time_steps::UnitRange{Int}) + sys = MockSystem(100.0) + settings = IOM.Settings( + sys; + horizon = Dates.Hour(length(time_steps)), + resolution = Dates.Hour(1), + ) + container = IOM.OptimizationContainer(sys, settings, JuMP.Model(), IS.Deterministic) + IOM.set_time_steps!(container, time_steps) + return container +end + +function _setup_qa_test(device_names::Vector{String}, time_steps::UnitRange{Int}) + container = _setup_qa_container(time_steps) + var_container = IOM.add_variable_container!( + container, + TestOriginalVariable(), + MockThermalGen, + device_names, + time_steps, + ) + jump_model = IOM.get_jump_model(container) + for name in device_names, t in time_steps + var_container[name, t] = JuMP.@variable(jump_model, base_name = "x_$(name)_$(t)",) + end + return (; container, var_container, jump_model) +end + +function _setup_bilinear_test(device_names::Vector{String}, time_steps::UnitRange{Int}) + container = _setup_qa_container(time_steps) + x_var_container = IOM.add_variable_container!( + container, + TestOriginalVariable(), + MockThermalGen, + device_names, + time_steps, + ) + y_var_container = IOM.add_variable_container!( + container, + TestApproximatedVariable(), + MockThermalGen, + device_names, + time_steps, + ) + jump_model = IOM.get_jump_model(container) + for name in device_names, t in time_steps + x_var_container[name, t] = + JuMP.@variable(jump_model, base_name = "x_$(name)_$(t)") + y_var_container[name, t] = + JuMP.@variable(jump_model, base_name = "y_$(name)_$(t)") + end + return (; container, x_var_container, y_var_container, jump_model) +end \ No newline at end of file