diff --git a/README.md b/README.md index 644b556c..9738a347 100644 --- a/README.md +++ b/README.md @@ -143,13 +143,14 @@ Fri 0 4 * [Cbc](https://projects.coin-or.org/Cbc) * [GLPK](https://www.gnu.org/software/glpk/) -* [HiGHS](https://www.maths.ed.ac.uk/hall/HiGHS/) +* [HiGHS](https://highs.dev/) * [Gurobi](https://www.gurobi.com/) * [Xpress](https://www.fico.com/en/products/fico-xpress-solver) * [Cplex](https://www.ibm.com/de-de/analytics/cplex-optimizer) * [MOSEK](https://www.mosek.com/) * [COPT](https://www.shanshu.ai/copt) * [cuPDLPx](https://github.com/MIT-Lu-Lab/cuPDLPx) +* [Knitro](https://www.artelys.com/solvers/knitro/) Note that these do have to be installed by the user separately. diff --git a/benchmark/benchmark_auto_mask.py b/benchmark/benchmark_auto_mask.py new file mode 100644 index 00000000..d478e950 --- /dev/null +++ b/benchmark/benchmark_auto_mask.py @@ -0,0 +1,639 @@ +#!/usr/bin/env python3 +""" +Benchmark comparing manual masking vs auto_mask for models with NaN coefficients. + +This creates a realistic scenario: a multi-period dispatch model where: +- Not all generators are available in all time periods (NaN in capacity bounds) +- Not all transmission lines exist between all regions (NaN in flow limits) +""" + +import sys +from pathlib import Path + +# Ensure we use the local linopy installation +project_root = Path(__file__).parent.parent +sys.path.insert(0, str(project_root)) + +import time # noqa: E402 +from typing import Any # noqa: E402 + +import numpy as np # noqa: E402 +import pandas as pd # noqa: E402 + +from linopy import GREATER_EQUAL, Model # noqa: E402 + + +def create_nan_data( + n_generators: int = 500, + n_periods: int = 100, + n_regions: int = 20, + nan_fraction_gen: float = 0.3, # 30% of generator-period combinations unavailable + nan_fraction_lines: float = 0.7, # 70% of region pairs have no direct line + seed: int = 42, +) -> dict[str, Any]: + """Create realistic input data with NaN patterns.""" + rng = np.random.default_rng(seed) + + generators = pd.Index(range(n_generators), name="generator") + periods = pd.Index(range(n_periods), name="period") + regions = pd.Index(range(n_regions), name="region") + + # Generator capacities - some generators unavailable in some periods (maintenance, etc.) + gen_capacity = pd.DataFrame( + rng.uniform(50, 500, size=(n_generators, n_periods)), + index=generators, + columns=periods, + ) + # Set random entries to NaN (generator unavailable) + nan_mask_gen = rng.random((n_generators, n_periods)) < nan_fraction_gen + gen_capacity.values[nan_mask_gen] = np.nan + + # Generator costs + gen_cost = pd.Series(rng.uniform(10, 100, n_generators), index=generators) + + # Generator to region mapping + gen_region = pd.Series(rng.integers(0, n_regions, n_generators), index=generators) + + # Demand per region per period + demand = pd.DataFrame( + rng.uniform(100, 1000, size=(n_regions, n_periods)), + index=regions, + columns=periods, + ) + + # Transmission line capacities - sparse network (not all regions connected) + # Use distinct dimension names to avoid xarray duplicate dimension issues + regions_from = pd.Index(range(n_regions), name="region_from") + regions_to = pd.Index(range(n_regions), name="region_to") + + line_capacity = pd.DataFrame( + np.nan, + index=regions_from, + columns=regions_to, + dtype=float, # Start with all NaN + ) + # Only some region pairs have lines + for i in range(n_regions): + for j in range(n_regions): + if i != j and rng.random() > nan_fraction_lines: + line_capacity.loc[i, j] = rng.uniform(100, 500) + + return { + "generators": generators, + "periods": periods, + "regions": regions, + "regions_from": regions_from, + "regions_to": regions_to, + "gen_capacity": gen_capacity, + "gen_cost": gen_cost, + "gen_region": gen_region, + "demand": demand, + "line_capacity": line_capacity, + } + + +def build_model_manual_mask(data: dict[str, Any]) -> Model: + """Build model using manual masking (traditional approach).""" + m = Model() + + generators = data["generators"] + periods = data["periods"] + regions = data["regions"] + regions_from = data["regions_from"] + regions_to = data["regions_to"] + gen_capacity = data["gen_capacity"] + gen_cost = data["gen_cost"] + gen_region = data["gen_region"] + demand = data["demand"] + line_capacity = data["line_capacity"] + + # Generator dispatch variables - manually mask where capacity is NaN + gen_mask = gen_capacity.notnull() + dispatch = m.add_variables( + lower=0, + upper=gen_capacity, + coords=[generators, periods], + name="dispatch", + mask=gen_mask, + ) + + # Flow variables between regions - manually mask where no line exists + flow_mask = line_capacity.notnull() + flow = m.add_variables( + lower=-line_capacity.abs(), + upper=line_capacity.abs(), + coords=[regions_from, regions_to], + name="flow", + mask=flow_mask, + ) + + # Energy balance constraint per region per period + for r in regions: + gens_in_region = generators[gen_region == r] + gen_sum = dispatch.loc[gens_in_region, :].sum("generator") + + # Net flow into region + flow_in = flow.loc[:, r].sum("region_from") + flow_out = flow.loc[r, :].sum("region_to") + + m.add_constraints( + gen_sum + flow_in - flow_out, + GREATER_EQUAL, + demand.loc[r], + name=f"balance_r{r}", + ) + + # Objective: minimize generation cost + obj = (dispatch * gen_cost).sum() + m.add_objective(obj) + + return m + + +def build_model_auto_mask(data: dict[str, Any]) -> Model: + """Build model using auto_mask=True (new approach).""" + m = Model(auto_mask=True) + + generators = data["generators"] + periods = data["periods"] + regions = data["regions"] + regions_from = data["regions_from"] + regions_to = data["regions_to"] + gen_capacity = data["gen_capacity"] + gen_cost = data["gen_cost"] + gen_region = data["gen_region"] + demand = data["demand"] + line_capacity = data["line_capacity"] + + # Generator dispatch variables - auto-masked where capacity is NaN + dispatch = m.add_variables( + lower=0, + upper=gen_capacity, # NaN values will be auto-masked + coords=[generators, periods], + name="dispatch", + ) + + # Flow variables between regions - auto-masked where no line exists + flow = m.add_variables( + lower=-line_capacity.abs(), + upper=line_capacity.abs(), # NaN values will be auto-masked + coords=[regions_from, regions_to], + name="flow", + ) + + # Energy balance constraint per region per period + for r in regions: + gens_in_region = generators[gen_region == r] + gen_sum = dispatch.loc[gens_in_region, :].sum("generator") + + # Net flow into region + flow_in = flow.loc[:, r].sum("region_from") + flow_out = flow.loc[r, :].sum("region_to") + + m.add_constraints( + gen_sum + flow_in - flow_out, + GREATER_EQUAL, + demand.loc[r], + name=f"balance_r{r}", + ) + + # Objective: minimize generation cost + obj = (dispatch * gen_cost).sum() + m.add_objective(obj) + + return m + + +def build_model_no_mask(data: dict[str, Any]) -> Model: + """Build model WITHOUT any masking (NaN values left in place).""" + m = Model() + + generators = data["generators"] + periods = data["periods"] + regions = data["regions"] + regions_from = data["regions_from"] + regions_to = data["regions_to"] + gen_capacity = data["gen_capacity"] + gen_cost = data["gen_cost"] + gen_region = data["gen_region"] + demand = data["demand"] + line_capacity = data["line_capacity"] + + # Generator dispatch variables - NO masking, NaN bounds left in place + dispatch = m.add_variables( + lower=0, + upper=gen_capacity, # Contains NaN values + coords=[generators, periods], + name="dispatch", + ) + + # Flow variables between regions - NO masking + flow = m.add_variables( + lower=-line_capacity.abs(), + upper=line_capacity.abs(), # Contains NaN values + coords=[regions_from, regions_to], + name="flow", + ) + + # Energy balance constraint per region per period + for r in regions: + gens_in_region = generators[gen_region == r] + gen_sum = dispatch.loc[gens_in_region, :].sum("generator") + + # Net flow into region + flow_in = flow.loc[:, r].sum("region_from") + flow_out = flow.loc[r, :].sum("region_to") + + m.add_constraints( + gen_sum + flow_in - flow_out, + GREATER_EQUAL, + demand.loc[r], + name=f"balance_r{r}", + ) + + # Objective: minimize generation cost + obj = (dispatch * gen_cost).sum() + m.add_objective(obj) + + return m + + +def benchmark( + n_generators: int = 500, + n_periods: int = 100, + n_regions: int = 20, + n_runs: int = 3, + solve: bool = True, +) -> dict[str, Any]: + """Run benchmark comparing no masking, manual masking, and auto masking.""" + print("=" * 70) + print("BENCHMARK: No Masking vs Manual Masking vs Auto-Masking") + print("=" * 70) + print("\nModel size:") + print(f" - Generators: {n_generators}") + print(f" - Time periods: {n_periods}") + print(f" - Regions: {n_regions}") + print(f" - Potential dispatch vars: {n_generators * n_periods:,}") + print(f" - Potential flow vars: {n_regions * n_regions:,}") + print(f"\nRunning {n_runs} iterations each...\n") + + # Generate data once + data = create_nan_data( + n_generators=n_generators, + n_periods=n_periods, + n_regions=n_regions, + ) + + # Count NaN entries + gen_nan_count = data["gen_capacity"].isna().sum().sum() + gen_total = data["gen_capacity"].size + line_nan_count = data["line_capacity"].isna().sum().sum() + line_total = data["line_capacity"].size + + print("NaN statistics:") + print( + f" - Generator capacity: {gen_nan_count:,}/{gen_total:,} " + f"({100 * gen_nan_count / gen_total:.1f}% NaN)" + ) + print( + f" - Line capacity: {line_nan_count:,}/{line_total:,} " + f"({100 * line_nan_count / line_total:.1f}% NaN)" + ) + print() + + # Benchmark NO masking (baseline) + no_mask_times = [] + for i in range(n_runs): + start = time.perf_counter() + m_no_mask = build_model_no_mask(data) + elapsed = time.perf_counter() - start + no_mask_times.append(elapsed) + if i == 0: + # Can't use nvars directly as it will fail with NaN values + # Instead count total variable labels (including those with NaN bounds) + no_mask_nvars = sum( + m_no_mask.variables[k].labels.size for k in m_no_mask.variables + ) + no_mask_ncons = m_no_mask.ncons + + # Benchmark manual masking + manual_times = [] + for i in range(n_runs): + start = time.perf_counter() + m_manual = build_model_manual_mask(data) + elapsed = time.perf_counter() - start + manual_times.append(elapsed) + if i == 0: + manual_nvars = m_manual.nvars + manual_ncons = m_manual.ncons + + # Benchmark auto masking + auto_times = [] + for i in range(n_runs): + start = time.perf_counter() + m_auto = build_model_auto_mask(data) + elapsed = time.perf_counter() - start + auto_times.append(elapsed) + if i == 0: + auto_nvars = m_auto.nvars + auto_ncons = m_auto.ncons + + # Results + print("-" * 70) + print("RESULTS: Model Building Time") + print("-" * 70) + + print("\nNo masking (baseline):") + print(f" - Mean time: {np.mean(no_mask_times):.3f}s") + print(f" - Variables: {no_mask_nvars:,} (includes NaN-bounded vars)") + print(f" - Constraints: {no_mask_ncons:,}") + + print("\nManual masking:") + print(f" - Mean time: {np.mean(manual_times):.3f}s") + print(f" - Variables: {manual_nvars:,}") + print(f" - Constraints: {manual_ncons:,}") + manual_overhead = np.mean(manual_times) - np.mean(no_mask_times) + print(f" - Overhead vs no-mask: {manual_overhead * 1000:+.1f}ms") + + print("\nAuto masking:") + print(f" - Mean time: {np.mean(auto_times):.3f}s") + print(f" - Variables: {auto_nvars:,}") + print(f" - Constraints: {auto_ncons:,}") + auto_overhead = np.mean(auto_times) - np.mean(no_mask_times) + print(f" - Overhead vs no-mask: {auto_overhead * 1000:+.1f}ms") + + # Comparison + print("\nComparison (Auto vs Manual):") + speedup = np.mean(manual_times) / np.mean(auto_times) + diff = np.mean(auto_times) - np.mean(manual_times) + if speedup > 1: + print(f" - Auto-mask is {speedup:.2f}x FASTER than manual") + else: + print(f" - Auto-mask is {1 / speedup:.2f}x SLOWER than manual") + print(f" - Time difference: {diff * 1000:+.1f}ms") + + # Verify models are equivalent + print("\nVerification:") + print(f" - Manual == Auto variables: {manual_nvars == auto_nvars}") + print(f" - Manual == Auto constraints: {manual_ncons == auto_ncons}") + print(f" - Variables masked out: {no_mask_nvars - manual_nvars:,}") + + results = { + "n_generators": n_generators, + "n_periods": n_periods, + "potential_vars": n_generators * n_periods, + "no_mask_time": np.mean(no_mask_times), + "manual_time": np.mean(manual_times), + "auto_time": np.mean(auto_times), + "nvars": manual_nvars, + "masked_out": no_mask_nvars - manual_nvars, + } + + # LP file write benchmark + print("\n" + "-" * 70) + print("RESULTS: LP File Write Time & Size") + print("-" * 70) + + import os + import tempfile + + # Write LP file for manual masked model + with tempfile.NamedTemporaryFile(suffix=".lp", delete=False) as f: + manual_lp_path = f.name + start = time.perf_counter() + m_manual.to_file(manual_lp_path) + manual_write_time = time.perf_counter() - start + manual_lp_size = os.path.getsize(manual_lp_path) / (1024 * 1024) # MB + os.unlink(manual_lp_path) + + # Write LP file for auto masked model + with tempfile.NamedTemporaryFile(suffix=".lp", delete=False) as f: + auto_lp_path = f.name + start = time.perf_counter() + m_auto.to_file(auto_lp_path) + auto_write_time = time.perf_counter() - start + auto_lp_size = os.path.getsize(auto_lp_path) / (1024 * 1024) # MB + os.unlink(auto_lp_path) + + print("\nManual masking:") + print(f" - Write time: {manual_write_time:.3f}s") + print(f" - File size: {manual_lp_size:.2f} MB") + + print("\nAuto masking:") + print(f" - Write time: {auto_write_time:.3f}s") + print(f" - File size: {auto_lp_size:.2f} MB") + + print(f"\nFiles identical: {abs(manual_lp_size - auto_lp_size) < 0.01}") + + results["manual_write_time"] = manual_write_time + results["auto_write_time"] = auto_write_time + results["lp_size_mb"] = manual_lp_size + + # Quick solve comparison + if solve: + print("\n" + "-" * 70) + print("RESULTS: Solve Time (single run)") + print("-" * 70) + + start = time.perf_counter() + m_manual.solve("highs", io_api="direct") + manual_solve = time.perf_counter() - start + + start = time.perf_counter() + m_auto.solve("highs", io_api="direct") + auto_solve = time.perf_counter() - start + + print(f"\nManual masking solve: {manual_solve:.3f}s") + print(f"Auto masking solve: {auto_solve:.3f}s") + + if m_manual.objective.value is not None and m_auto.objective.value is not None: + print( + f"Objective values match: " + f"{np.isclose(m_manual.objective.value, m_auto.objective.value)}" + ) + print(f" - Manual: {m_manual.objective.value:.2f}") + print(f" - Auto: {m_auto.objective.value:.2f}") + + return results + + +def benchmark_code_simplicity() -> None: + """Show the code simplicity benefit of auto_mask.""" + print("\n" + "=" * 70) + print("CODE COMPARISON: Manual vs Auto-Mask") + print("=" * 70) + + manual_code = """ +# Manual masking - must create mask explicitly +gen_mask = gen_capacity.notnull() +dispatch = m.add_variables( + lower=0, + upper=gen_capacity, + coords=[generators, periods], + name="dispatch", + mask=gen_mask, # Extra step required +) +""" + + auto_code = """ +# Auto masking - just pass the data with NaN +m = Model(auto_mask=True) +dispatch = m.add_variables( + lower=0, + upper=gen_capacity, # NaN auto-masked + coords=[generators, periods], + name="dispatch", +) +""" + + print("\nManual masking approach:") + print(manual_code) + print("Auto-mask approach:") + print(auto_code) + print("Benefits of auto_mask:") + print(" - Less boilerplate code") + print(" - No need to manually track which arrays have NaN") + print(" - Reduces risk of forgetting to mask") + print(" - Cleaner, more declarative style") + + +def benchmark_constraint_masking(n_runs: int = 3) -> None: + """Benchmark auto-masking of constraints with NaN in RHS.""" + print("\n" + "=" * 70) + print("BENCHMARK: Constraint Auto-Masking (NaN in RHS)") + print("=" * 70) + + n_vars = 1000 + n_constraints = 5000 + nan_fraction = 0.3 + + rng = np.random.default_rng(42) + idx = pd.Index(range(n_vars), name="i") + con_idx = pd.Index(range(n_constraints), name="c") + + # Create RHS with NaN values + rhs = pd.Series(rng.uniform(1, 100, n_constraints), index=con_idx) + nan_mask = rng.random(n_constraints) < nan_fraction + rhs.values[nan_mask] = np.nan + + print("\nModel size:") + print(f" - Variables: {n_vars}") + print(f" - Potential constraints: {n_constraints}") + print(f" - NaN in RHS: {nan_mask.sum()} ({100 * nan_fraction:.0f}%)") + print(f"\nRunning {n_runs} iterations each...\n") + + # Manual masking + manual_times = [] + for i in range(n_runs): + start = time.perf_counter() + m = Model() + x = m.add_variables(lower=0, coords=[idx], name="x") + coeffs = pd.DataFrame( + rng.uniform(0.1, 1, (n_constraints, n_vars)), index=con_idx, columns=idx + ) + con_mask = rhs.notnull() # Manual mask creation + m.add_constraints((coeffs * x).sum("i"), GREATER_EQUAL, rhs, mask=con_mask) + m.add_objective(x.sum()) + elapsed = time.perf_counter() - start + manual_times.append(elapsed) + if i == 0: + manual_ncons = m.ncons + + # Auto masking + auto_times = [] + for i in range(n_runs): + start = time.perf_counter() + m = Model(auto_mask=True) + x = m.add_variables(lower=0, coords=[idx], name="x") + coeffs = pd.DataFrame( + rng.uniform(0.1, 1, (n_constraints, n_vars)), index=con_idx, columns=idx + ) + m.add_constraints((coeffs * x).sum("i"), GREATER_EQUAL, rhs) # No mask needed + m.add_objective(x.sum()) + elapsed = time.perf_counter() - start + auto_times.append(elapsed) + if i == 0: + auto_ncons = m.ncons + + print("-" * 70) + print("RESULTS: Constraint Building Time") + print("-" * 70) + print("\nManual masking:") + print(f" - Mean time: {np.mean(manual_times):.3f}s") + print(f" - Active constraints: {manual_ncons:,}") + + print("\nAuto masking:") + print(f" - Mean time: {np.mean(auto_times):.3f}s") + print(f" - Active constraints: {auto_ncons:,}") + + overhead = np.mean(auto_times) - np.mean(manual_times) + print(f"\nOverhead: {overhead * 1000:.1f}ms") + print(f"Same constraint count: {manual_ncons == auto_ncons}") + + +def print_summary_table(results: list[dict[str, Any]]) -> None: + """Print a summary table of all benchmark results.""" + print("\n" + "=" * 110) + print("SUMMARY TABLE: Model Building & LP Write Times") + print("=" * 110) + print( + f"{'Model':<12} {'Pot.Vars':>10} {'Act.Vars':>10} {'Masked':>8} " + f"{'No-Mask':>9} {'Manual':>9} {'Auto':>9} {'Diff':>8} " + f"{'LP Write':>9} {'LP Size':>9}" + ) + print("-" * 110) + for r in results: + name = f"{r['n_generators']}x{r['n_periods']}" + lp_write = r.get("manual_write_time", 0) * 1000 + lp_size = r.get("lp_size_mb", 0) + print( + f"{name:<12} {r['potential_vars']:>10,} {r['nvars']:>10,} " + f"{r['masked_out']:>8,} {r['no_mask_time'] * 1000:>8.0f}ms " + f"{r['manual_time'] * 1000:>8.0f}ms {r['auto_time'] * 1000:>8.0f}ms " + f"{(r['auto_time'] - r['manual_time']) * 1000:>+7.0f}ms " + f"{lp_write:>8.0f}ms {lp_size:>8.1f}MB" + ) + print("-" * 110) + print("Pot.Vars = Potential variables, Act.Vars = Active (non-masked) variables") + print("Masked = Variables masked out due to NaN bounds") + print("Diff = Auto-mask time minus Manual mask time (negative = faster)") + print("LP Write = Time to write LP file, LP Size = LP file size in MB") + + +if __name__ == "__main__": + all_results = [] + + # Run benchmarks with different sizes + print("\n### SMALL MODEL ###") + all_results.append( + benchmark(n_generators=100, n_periods=50, n_regions=10, n_runs=5, solve=False) + ) + + print("\n\n### MEDIUM MODEL ###") + all_results.append( + benchmark(n_generators=500, n_periods=100, n_regions=20, n_runs=3, solve=False) + ) + + print("\n\n### LARGE MODEL ###") + all_results.append( + benchmark(n_generators=1000, n_periods=200, n_regions=30, n_runs=3, solve=False) + ) + + print("\n\n### VERY LARGE MODEL ###") + all_results.append( + benchmark(n_generators=2000, n_periods=500, n_regions=40, n_runs=3, solve=False) + ) + + print("\n\n### EXTRA LARGE MODEL ###") + all_results.append( + benchmark(n_generators=5000, n_periods=500, n_regions=50, n_runs=2, solve=False) + ) + + # Print summary table + print_summary_table(all_results) + + # Run constraint benchmark + benchmark_constraint_masking() + + # Show code comparison + benchmark_code_simplicity() diff --git a/doc/api.rst b/doc/api.rst index 6011aa81..57a61e3e 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -18,6 +18,9 @@ Creating a model model.Model.add_variables model.Model.add_constraints model.Model.add_objective + model.Model.add_piecewise_constraints + model.Model.add_disjunctive_piecewise_constraints + piecewise.breakpoints model.Model.linexpr model.Model.remove_constraints diff --git a/doc/conf.py b/doc/conf.py index d33175e1..d7cce91b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -13,7 +13,7 @@ # import os # import sys # sys.path.insert(0, os.path.abspath('.')) -import pkg_resources # part of setuptools +import linopy # -- Project information ----------------------------------------------------- @@ -22,12 +22,9 @@ author = "Fabian Hofmann" # The full version, including alpha/beta/rc tags -version = pkg_resources.get_distribution("linopy").version +version = linopy.__version__ release = "master" if "dev" in version else version -# For some reason is this needed, otherwise autosummary does fail on RTD but not locally -import linopy # noqa - # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be diff --git a/doc/index.rst b/doc/index.rst index bff9fa65..6801aeaf 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -42,7 +42,7 @@ flexible data-handling features: - Support of various solvers - `Cbc `__ - `GLPK `__ - - `HiGHS `__ + - `HiGHS `__ - `MindOpt `__ - `Gurobi `__ - `Xpress `__ @@ -112,6 +112,8 @@ This package is published under MIT license. creating-expressions creating-constraints sos-constraints + piecewise-linear-constraints + piecewise-linear-constraints-tutorial manipulating-models testing-framework transport-tutorial diff --git a/doc/piecewise-linear-constraints-tutorial.nblink b/doc/piecewise-linear-constraints-tutorial.nblink new file mode 100644 index 00000000..ea48e11f --- /dev/null +++ b/doc/piecewise-linear-constraints-tutorial.nblink @@ -0,0 +1,3 @@ +{ + "path": "../examples/piecewise-linear-constraints.ipynb" +} diff --git a/doc/piecewise-linear-constraints.rst b/doc/piecewise-linear-constraints.rst new file mode 100644 index 00000000..b4c6336d --- /dev/null +++ b/doc/piecewise-linear-constraints.rst @@ -0,0 +1,384 @@ +.. _piecewise-linear-constraints: + +Piecewise Linear Constraints +============================ + +Piecewise linear (PWL) constraints approximate nonlinear functions as connected +linear segments, allowing you to model cost curves, efficiency curves, or +production functions within a linear programming framework. + +Linopy provides two methods: + +- :py:meth:`~linopy.model.Model.add_piecewise_constraints` -- for + **continuous** piecewise linear functions (segments connected end-to-end). +- :py:meth:`~linopy.model.Model.add_disjunctive_piecewise_constraints` -- for + **disconnected** segments (with gaps between them). + +.. contents:: + :local: + :depth: 2 + +Formulations +------------ + +SOS2 (Convex Combination) +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Given breakpoints :math:`b_0, b_1, \ldots, b_n`, the SOS2 formulation +introduces interpolation variables :math:`\lambda_i` such that: + +.. math:: + + \lambda_i \in [0, 1], \quad + \sum_{i=0}^{n} \lambda_i = 1, \quad + x = \sum_{i=0}^{n} \lambda_i \, b_i + +The SOS2 constraint ensures that **at most two adjacent** :math:`\lambda_i` can +be non-zero, so :math:`x` is interpolated within one segment. + +**Dict (multi-variable) case.** When multiple variables share the same lambdas, +breakpoints carry an extra *link* dimension :math:`v \in V` and linking becomes +:math:`x_v = \sum_i \lambda_i \, b_{v,i}` for all :math:`v`. + +.. note:: + + SOS2 is a combinatorial constraint handled via branch-and-bound, similar to + integer variables. It cannot be reformulated as a pure LP. Prefer the + incremental method (``method="incremental"`` or ``method="auto"``) when + breakpoints are monotonic. + +Incremental (Delta) Formulation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For **strictly monotonic** breakpoints :math:`b_0 < b_1 < \cdots < b_n`, the +incremental formulation is a **pure LP** (no SOS2 or binary variables): + +.. math:: + + \delta_i \in [0, 1], \quad + \delta_{i+1} \le \delta_i, \quad + x = b_0 + \sum_{i=1}^{n} \delta_i \, (b_i - b_{i-1}) + +The filling-order constraints enforce that segment :math:`i+1` cannot be +partially filled unless segment :math:`i` is completely filled. + +**Limitation:** Breakpoints must be strictly monotonic for every linked +variable. In the dict case, each variable is checked independently -- e.g. +power increasing while fuel decreases is fine, but a curve that rises then +falls is not. For non-monotonic curves, use SOS2. + +Disjunctive (Disaggregated Convex Combination) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For **disconnected segments** (with gaps), the disjunctive formulation selects +exactly one segment via binary indicators and applies SOS2 within it. No big-M +constants are needed, giving a tight LP relaxation. + +Given :math:`K` segments, each with breakpoints :math:`b_{k,0}, \ldots, b_{k,n_k}`: + +.. math:: + + y_k \in \{0, 1\}, \quad \sum_{k} y_k = 1 + + \lambda_{k,i} \in [0, 1], \quad + \sum_{i} \lambda_{k,i} = y_k, \quad + x = \sum_{k} \sum_{i} \lambda_{k,i} \, b_{k,i} + +.. _choosing-a-formulation: + +Choosing a Formulation +~~~~~~~~~~~~~~~~~~~~~~ + +The incremental method is the fastest to solve (pure LP), but requires strictly +monotonic breakpoints. Pass ``method="auto"`` to use it automatically when +applicable, falling back to SOS2 otherwise. + +.. list-table:: + :header-rows: 1 + :widths: 25 25 25 25 + + * - Property + - SOS2 + - Incremental + - Disjunctive + * - Segments + - Connected + - Connected + - Disconnected (gaps allowed) + * - Breakpoint order + - Any + - Strictly monotonic + - Any (per segment) + * - Variable types + - Continuous + SOS2 + - Continuous only (pure LP) + - Binary + SOS2 + * - Solver support + - Solvers with SOS2 support + - **Any LP solver** + - Solvers with SOS2 + MIP support + +Basic Usage +----------- + +Single variable +~~~~~~~~~~~~~~~ + +.. code-block:: python + + import linopy + + m = linopy.Model() + x = m.add_variables(name="x") + + bp = linopy.breakpoints([0, 10, 50, 100]) + m.add_piecewise_constraints(x, bp, dim="breakpoint") + +Dict of variables +~~~~~~~~~~~~~~~~~~ + +Link multiple variables through shared interpolation weights. For example, a +turbine where power input determines power output (via a nonlinear efficiency +factor): + +.. code-block:: python + + m = linopy.Model() + + power_in = m.add_variables(name="power_in") + power_out = m.add_variables(name="power_out") + + bp = linopy.breakpoints( + power_in=[0, 50, 100], + power_out=[0, 47.5, 90], + ) + + m.add_piecewise_constraints( + {"power_in": power_in, "power_out": power_out}, + bp, + dim="breakpoint", + ) + +Incremental method +~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + m.add_piecewise_constraints(x, bp, dim="breakpoint", method="incremental") + +Pass ``method="auto"`` to automatically select incremental when breakpoints are +strictly monotonic, falling back to SOS2 otherwise. + +Disjunctive (disconnected segments) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + m = linopy.Model() + x = m.add_variables(name="x") + + bp = linopy.breakpoints.segments([(0, 10), (50, 100)]) + m.add_disjunctive_piecewise_constraints(x, bp) + +Breakpoints Factory +------------------- + +The ``linopy.breakpoints()`` factory simplifies creating breakpoint DataArrays +with correct dimensions and coordinates. + +From a list +~~~~~~~~~~~ + +.. code-block:: python + + # 1D breakpoints (dims: [breakpoint]) + bp = linopy.breakpoints([0, 50, 100]) + +From keyword arguments (multi-variable) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + # 2D breakpoints (dims: [var, breakpoint]) + bp = linopy.breakpoints(power=[0, 50, 100], fuel=[0, 60, 140]) + +From a dict (per-entity, ragged lengths allowed) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + # 2D breakpoints (dims: [generator, breakpoint]), NaN-padded + bp = linopy.breakpoints( + {"gen1": [0, 50, 100], "gen2": [0, 80]}, + dim="generator", + ) + +Per-entity with multiple variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + # 3D breakpoints (dims: [generator, var, breakpoint]) + bp = linopy.breakpoints( + power={"gen1": [0, 50, 100], "gen2": [0, 80]}, + fuel={"gen1": [0, 60, 140], "gen2": [0, 100]}, + dim="generator", + ) + +Segments (for disjunctive constraints) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + # 2D breakpoints (dims: [segment, breakpoint]) + bp = linopy.breakpoints.segments([(0, 10), (50, 100)]) + + # Per-entity segments + bp = linopy.breakpoints.segments( + {"gen1": [(0, 10), (50, 100)], "gen2": [(0, 80)]}, + dim="generator", + ) + +Auto-broadcasting +----------------- + +Breakpoints are automatically broadcast to match the dimensions of the +expression or variable. This means you don't need to manually call +``expand_dims`` when your variables have extra dimensions (e.g. ``time``): + +.. code-block:: python + + m = linopy.Model() + time = pd.Index([1, 2, 3], name="time") + x = m.add_variables(name="x", coords=[time]) + + # 1D breakpoints are auto-expanded to match x's time dimension + bp = linopy.breakpoints([0, 50, 100]) + m.add_piecewise_constraints(x, bp, dim="breakpoint") + +This also works for ``add_disjunctive_piecewise_constraints`` and dict +expressions. + +Method Signatures +----------------- + +``add_piecewise_constraints`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + Model.add_piecewise_constraints( + expr, + breakpoints, + dim="breakpoint", + mask=None, + name=None, + skip_nan_check=False, + method="sos2", + ) + +- ``expr`` -- ``Variable``, ``LinearExpression``, or ``dict`` of these. +- ``breakpoints`` -- ``xr.DataArray`` with breakpoint values. Must have ``dim`` + as a dimension. For the dict case, must also have a dimension whose + coordinates match the dict keys. +- ``dim`` -- ``str``, default ``"breakpoint"``. Breakpoint-index dimension. +- ``mask`` -- ``xr.DataArray``, optional. Boolean mask for valid constraints. +- ``name`` -- ``str``, optional. Base name for generated variables/constraints. +- ``skip_nan_check`` -- ``bool``, default ``False``. +- ``method`` -- ``"sos2"`` (default), ``"incremental"``, or ``"auto"``. + +``add_disjunctive_piecewise_constraints`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + Model.add_disjunctive_piecewise_constraints( + expr, + breakpoints, + dim="breakpoint", + segment_dim="segment", + mask=None, + name=None, + skip_nan_check=False, + ) + +Same as above, plus: + +- ``segment_dim`` -- ``str``, default ``"segment"``. Dimension indexing + segments. Use NaN in breakpoints to pad segments with fewer breakpoints. + +Generated Variables and Constraints +------------------------------------ + +Given base name ``name``, the following objects are created: + +**SOS2 method:** + +.. list-table:: + :header-rows: 1 + :widths: 30 15 55 + + * - Name + - Type + - Description + * - ``{name}_lambda`` + - Variable + - Interpolation weights :math:`\lambda_i \in [0, 1]` (SOS2). + * - ``{name}_convex`` + - Constraint + - :math:`\sum_i \lambda_i = 1`. + * - ``{name}_link`` + - Constraint + - :math:`x = \sum_i \lambda_i \, b_i`. + +**Incremental method:** + +.. list-table:: + :header-rows: 1 + :widths: 30 15 55 + + * - Name + - Type + - Description + * - ``{name}_delta`` + - Variable + - Fill-fraction variables :math:`\delta_i \in [0, 1]`. + * - ``{name}_fill`` + - Constraint + - :math:`\delta_{i+1} \le \delta_i` (only if 3+ breakpoints). + * - ``{name}_link`` + - Constraint + - :math:`x = b_0 + \sum_i \delta_i \, s_i`. + +**Disjunctive method:** + +.. list-table:: + :header-rows: 1 + :widths: 30 15 55 + + * - Name + - Type + - Description + * - ``{name}_binary`` + - Variable + - Segment indicators :math:`y_k \in \{0, 1\}`. + * - ``{name}_select`` + - Constraint + - :math:`\sum_k y_k = 1`. + * - ``{name}_lambda`` + - Variable + - Per-segment interpolation weights (SOS2). + * - ``{name}_convex`` + - Constraint + - :math:`\sum_i \lambda_{k,i} = y_k`. + * - ``{name}_link`` + - Constraint + - :math:`x = \sum_k \sum_i \lambda_{k,i} \, b_{k,i}`. + +See Also +-------- + +- :doc:`piecewise-linear-constraints-tutorial` -- Worked examples with all three formulations +- :doc:`sos-constraints` -- Low-level SOS1/SOS2 constraint API +- :doc:`creating-constraints` -- General constraint creation +- :doc:`user-guide` -- Overall linopy usage patterns diff --git a/doc/prerequisites.rst b/doc/prerequisites.rst index 23b17897..97d51296 100644 --- a/doc/prerequisites.rst +++ b/doc/prerequisites.rst @@ -35,7 +35,7 @@ CPU-based solvers - `Cbc `__ - open source, free, fast - `GLPK `__ - open source, free, not very fast -- `HiGHS `__ - open source, free, fast +- `HiGHS `__ - open source, free, fast - `Gurobi `__ - closed source, commercial, very fast - `Xpress `__ - closed source, commercial, very fast (GPU acceleration available in v9.8+) - `Cplex `__ - closed source, commercial, very fast diff --git a/doc/release_notes.rst b/doc/release_notes.rst index b727c22d..59b4456f 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -4,8 +4,41 @@ Release Notes Upcoming Version ---------------- -* Fix docs (pick highs solver) +* Add ``add_piecewise_constraints()`` for piecewise linear constraints with SOS2 and incremental (pure LP) formulations. +* Add ``add_disjunctive_piecewise_constraints()`` for disconnected piecewise linear segments (e.g. forbidden operating zones). +* Add ``linopy.breakpoints()`` factory for convenient breakpoint construction from lists, dicts, or keyword arguments. Includes ``breakpoints.segments()`` for disjunctive formulations. * Add the `sphinx-copybutton` to the documentation +* Add SOS1 and SOS2 reformulations for solvers not supporting them. + + +Version 0.6.4 +-------------- + +* Add support for the `knitro` solver via the knitro python API + +Version 0.6.3 +-------------- + +**Fix Regression** + +* Reinsert broadcasting logic of mask object to be fully compatible with performance improvements in version 0.6.2 using `np.where` instead of `xr.where`. + + +Version 0.6.2 +-------------- + +**Features** + +* Add ``auto_mask`` parameter to ``Model`` class that automatically masks variables and constraints where bounds, coefficients, or RHS values contain NaN. This eliminates the need to manually create mask arrays when working with sparse or incomplete data. + +**Performance** + +* Speed up LP file writing by 2-2.7x on large models through Polars streaming engine, join-based constraint assembly, and reduced per-constraint overhead + +**Bug Fixes** + +* Fix multiplication of constant-only ``LinearExpression`` with other expressions +* Fix docs and Gurobi license handling Version 0.6.1 -------------- @@ -657,7 +690,7 @@ Version 0.0.5 * The `Variable` class now has a `lower` and `upper` accessor, which allows to inspect and modify the lower and upper bounds of a assigned variable. * The `Constraint` class now has a `lhs`, `vars`, `coeffs`, `rhs` and `sign` accessor, which allows to inspect and modify the left-hand-side, the signs and right-hand-side of a assigned constraint. * Constraints can now be build combining linear expressions with right-hand-side via a `>=`, `<=` or a `==` operator. This creates an `AnonymousConstraint` which can be passed to `Model.add_constraints`. -* Add support of the HiGHS open source solver https://www.maths.ed.ac.uk/hall/HiGHS/ (https://github.com/PyPSA/linopy/pull/8, https://github.com/PyPSA/linopy/pull/17). +* Add support of the HiGHS open source solver https://highs.dev/ (https://github.com/PyPSA/linopy/pull/8, https://github.com/PyPSA/linopy/pull/17). **Breaking changes** diff --git a/doc/sos-constraints.rst b/doc/sos-constraints.rst index 37dd72d2..caa4b5e5 100644 --- a/doc/sos-constraints.rst +++ b/doc/sos-constraints.rst @@ -75,7 +75,7 @@ Method Signature .. code-block:: python - Model.add_sos_constraints(variable, sos_type, sos_dim) + Model.add_sos_constraints(variable, sos_type, sos_dim, big_m=None) **Parameters:** @@ -85,6 +85,8 @@ Method Signature Type of SOS constraint (1 or 2) - ``sos_dim`` : str Name of the dimension along which the SOS constraint applies +- ``big_m`` : float | None + Custom Big-M value for reformulation (see :ref:`sos-reformulation`) **Requirements:** @@ -254,12 +256,94 @@ SOS constraints are supported by most modern mixed-integer programming solvers t - MOSEK - MindOpt +For unsupported solvers, use automatic reformulation (see below). + +.. _sos-reformulation: + +SOS Reformulation +----------------- + +For solvers without native SOS support, linopy can reformulate SOS constraints +as binary + linear constraints using the Big-M method. + +.. code-block:: python + + # Automatic reformulation during solve + m.solve(solver_name="highs", reformulate_sos=True) + + # Or reformulate manually + m.reformulate_sos_constraints() + m.solve(solver_name="highs") + +**Requirements:** + +- Variables must have **non-negative lower bounds** (lower >= 0) +- Big-M values are derived from variable upper bounds +- For infinite upper bounds, specify custom values via the ``big_m`` parameter + +.. code-block:: python + + # Finite bounds (default) + x = m.add_variables(lower=0, upper=100, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + # Infinite upper bounds: specify Big-M + x = m.add_variables(lower=0, upper=np.inf, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=10) + +The reformulation uses the tighter of ``big_m`` and variable upper bound. + +Mathematical Formulation +~~~~~~~~~~~~~~~~~~~~~~~~ + +**SOS1 Reformulation:** + +Original constraint: :math:`\text{SOS1}(\{x_1, x_2, \ldots, x_n\})` means at most one +:math:`x_i` can be non-zero. + +Given :math:`x = (x_1, \ldots, x_n) \in \mathbb{R}^n_+`, introduce binary +:math:`y = (y_1, \ldots, y_n) \in \{0,1\}^n`: + +.. math:: + + x_i &\leq M_i \cdot y_i \quad \forall i \in \{1, \ldots, n\} \\ + x_i &\geq 0 \quad \forall i \in \{1, \ldots, n\} \\ + \sum_{i=1}^{n} y_i &\leq 1 \\ + y_i &\in \{0, 1\} \quad \forall i \in \{1, \ldots, n\} + +where :math:`M_i \geq \sup\{x_i\}` (upper bound on :math:`x_i`). + +**SOS2 Reformulation:** + +Original constraint: :math:`\text{SOS2}(\{x_1, x_2, \ldots, x_n\})` means at most two +:math:`x_i` can be non-zero, and if two are non-zero, they must have consecutive indices. + +Given :math:`x = (x_1, \ldots, x_n) \in \mathbb{R}^n_+`, introduce binary +:math:`y = (y_1, \ldots, y_{n-1}) \in \{0,1\}^{n-1}`: + +.. math:: + + x_1 &\leq M_1 \cdot y_1 \\ + x_i &\leq M_i \cdot (y_{i-1} + y_i) \quad \forall i \in \{2, \ldots, n-1\} \\ + x_n &\leq M_n \cdot y_{n-1} \\ + x_i &\geq 0 \quad \forall i \in \{1, \ldots, n\} \\ + \sum_{i=1}^{n-1} y_i &\leq 1 \\ + y_i &\in \{0, 1\} \quad \forall i \in \{1, \ldots, n-1\} + +where :math:`M_i \geq \sup\{x_i\}`. Interpretation: :math:`y_i = 1` activates interval +:math:`[i, i+1]`, allowing :math:`x_i` and :math:`x_{i+1}` to be non-zero. + Common Patterns --------------- Piecewise Linear Cost Function ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. note:: + + For a higher-level API that handles all the SOS2 bookkeeping automatically, + see :doc:`piecewise-linear-constraints`. + .. code-block:: python def add_piecewise_cost(model, variable, breakpoints, costs): diff --git a/examples/piecewise-linear-constraints.ipynb b/examples/piecewise-linear-constraints.ipynb new file mode 100644 index 00000000..dd9192b3 --- /dev/null +++ b/examples/piecewise-linear-constraints.ipynb @@ -0,0 +1,541 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "intro", + "metadata": {}, + "source": [ + "# Piecewise Linear Constraints\n", + "\n", + "This notebook demonstrates linopy's three PWL formulations. Each example\n", + "builds a separate dispatch model where a single power plant must meet\n", + "a time-varying demand.\n", + "\n", + "| Example | Plant | Limitation | Formulation |\n", + "|---------|-------|------------|-------------|\n", + "| 1 | Gas turbine (0–100 MW) | Convex heat rate | SOS2 |\n", + "| 2 | Coal plant (0–150 MW) | Monotonic heat rate | Incremental |\n", + "| 3 | Diesel generator (off or 50–80 MW) | Forbidden zone | Disjunctive |" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "imports", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.511970Z", + "start_time": "2026-02-09T19:21:33.501473Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:41.350637Z", + "iopub.status.busy": "2026-02-09T19:21:41.350440Z", + "iopub.status.idle": "2026-02-09T19:21:42.583457Z", + "shell.execute_reply": "2026-02-09T19:21:42.583146Z" + } + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "import xarray as xr\n", + "\n", + "import linopy\n", + "\n", + "time = pd.Index([1, 2, 3], name=\"time\")\n", + "\n", + "\n", + "def plot_pwl_results(model, breakpoints, demand, color=\"C0\", fuel_rate=None):\n", + " \"\"\"Plot PWL curve with operating points and dispatch vs demand.\"\"\"\n", + " sol = model.solution\n", + " bp = breakpoints.to_pandas()\n", + " fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3.5))\n", + "\n", + " # Left: PWL curve with operating points\n", + " if \"var\" in breakpoints.dims:\n", + " # Connected: power-fuel curve from var dimension\n", + " ax1.plot(\n", + " bp.loc[\"power\"], bp.loc[\"fuel\"], \"o-\", color=color, label=\"Breakpoints\"\n", + " )\n", + " for t in time:\n", + " ax1.plot(\n", + " sol[\"power\"].sel(time=t),\n", + " sol[\"fuel\"].sel(time=t),\n", + " \"s\",\n", + " ms=10,\n", + " label=f\"t={t}\",\n", + " )\n", + " ax1.set(xlabel=\"Power (MW)\", ylabel=\"Fuel (MWh)\", title=\"Heat rate curve\")\n", + " else:\n", + " # Disconnected: segments with linear cost\n", + " for seg in bp.index:\n", + " lo, hi = bp.loc[seg]\n", + " pw = [lo, hi] if lo != hi else [lo]\n", + " ax1.plot(\n", + " pw,\n", + " [fuel_rate * p for p in pw],\n", + " \"o-\",\n", + " color=color,\n", + " label=\"Breakpoints\" if seg == 0 else None,\n", + " )\n", + " ax1.axvspan(\n", + " bp.iloc[0, 1] + 0.5,\n", + " bp.iloc[1, 0] - 0.5,\n", + " color=\"red\",\n", + " alpha=0.1,\n", + " label=\"Forbidden zone\",\n", + " )\n", + " for t in time:\n", + " p = float(sol[\"power\"].sel(time=t))\n", + " ax1.plot(p, fuel_rate * p, \"s\", ms=10, label=f\"t={t}\")\n", + " ax1.set(xlabel=\"Power (MW)\", ylabel=\"Cost\", title=\"Cost curve\")\n", + " ax1.legend()\n", + "\n", + " # Right: dispatch vs demand\n", + " x = list(range(len(time)))\n", + " power_vals = sol[\"power\"].values\n", + " ax2.bar(x, power_vals, color=color, label=\"Power\")\n", + " if \"backup\" in sol:\n", + " ax2.bar(\n", + " x,\n", + " sol[\"backup\"].values,\n", + " bottom=power_vals,\n", + " color=\"C3\",\n", + " alpha=0.5,\n", + " label=\"Backup\",\n", + " )\n", + " ax2.step(\n", + " [v - 0.5 for v in x] + [x[-1] + 0.5],\n", + " list(demand.values) + [demand.values[-1]],\n", + " where=\"post\",\n", + " color=\"black\",\n", + " lw=2,\n", + " label=\"Demand\",\n", + " )\n", + " ax2.set(\n", + " xlabel=\"Time\", ylabel=\"MW\", title=\"Dispatch\", xticks=x, xticklabels=time.values\n", + " )\n", + " ax2.legend()\n", + " plt.tight_layout()" + ] + }, + { + "cell_type": "markdown", + "id": "sos2-md", + "metadata": {}, + "source": [ + "## 1. SOS2 formulation — Gas turbine\n", + "\n", + "The gas turbine has a **convex** heat rate: efficient at moderate load,\n", + "increasingly fuel-hungry at high output. We use the **SOS2** formulation\n", + "to link power output and fuel consumption." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "sos2-setup", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.525641Z", + "start_time": "2026-02-09T19:21:33.516874Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:42.585470Z", + "iopub.status.busy": "2026-02-09T19:21:42.585263Z", + "iopub.status.idle": "2026-02-09T19:21:42.639106Z", + "shell.execute_reply": "2026-02-09T19:21:42.638745Z" + } + }, + "outputs": [], + "source": [ + "breakpoints = linopy.breakpoints(power=[0, 30, 60, 100], fuel=[0, 36, 84, 170])\n", + "breakpoints.to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df198d44e962132f", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.584017Z", + "start_time": "2026-02-09T19:21:33.548479Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:42.640305Z", + "iopub.status.busy": "2026-02-09T19:21:42.640145Z", + "iopub.status.idle": "2026-02-09T19:21:42.676689Z", + "shell.execute_reply": "2026-02-09T19:21:42.676404Z" + } + }, + "outputs": [], + "source": [ + "m1 = linopy.Model()\n", + "\n", + "power = m1.add_variables(name=\"power\", lower=0, upper=100, coords=[time])\n", + "fuel = m1.add_variables(name=\"fuel\", lower=0, coords=[time])\n", + "\n", + "# breakpoints are auto-broadcast to match the time dimension\n", + "m1.add_piecewise_constraints(\n", + " {\"power\": power, \"fuel\": fuel},\n", + " breakpoints,\n", + " dim=\"breakpoint\",\n", + " name=\"pwl\",\n", + " method=\"sos2\",\n", + ")\n", + "\n", + "demand1 = xr.DataArray([50, 80, 30], coords=[time])\n", + "m1.add_constraints(power >= demand1, name=\"demand\")\n", + "m1.add_objective(fuel.sum())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "sos2-solve", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.646228Z", + "start_time": "2026-02-09T19:21:33.602890Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:42.678723Z", + "iopub.status.busy": "2026-02-09T19:21:42.678455Z", + "iopub.status.idle": "2026-02-09T19:21:42.729810Z", + "shell.execute_reply": "2026-02-09T19:21:42.729268Z" + } + }, + "outputs": [], + "source": [ + "m1.solve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "sos2-results", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.671517Z", + "start_time": "2026-02-09T19:21:33.665702Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:42.732333Z", + "iopub.status.busy": "2026-02-09T19:21:42.732173Z", + "iopub.status.idle": "2026-02-09T19:21:42.737877Z", + "shell.execute_reply": "2026-02-09T19:21:42.737648Z" + } + }, + "outputs": [], + "source": [ + "m1.solution[[\"power\", \"fuel\"]].to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "hcqytsfoaa", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.802613Z", + "start_time": "2026-02-09T19:21:33.695925Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:42.739144Z", + "iopub.status.busy": "2026-02-09T19:21:42.738977Z", + "iopub.status.idle": "2026-02-09T19:21:42.983660Z", + "shell.execute_reply": "2026-02-09T19:21:42.982758Z" + } + }, + "outputs": [], + "source": [ + "plot_pwl_results(m1, breakpoints, demand1, color=\"C0\")" + ] + }, + { + "cell_type": "markdown", + "id": "incremental-md", + "metadata": {}, + "source": [ + "## 2. Incremental formulation — Coal plant\n", + "\n", + "The coal plant has a **monotonically increasing** heat rate. Since all\n", + "breakpoints are strictly monotonic, we can use the **incremental**\n", + "formulation — a pure LP with no SOS2 or binary variables." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "incremental-setup", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.829667Z", + "start_time": "2026-02-09T19:21:33.825683Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:42.987305Z", + "iopub.status.busy": "2026-02-09T19:21:42.986204Z", + "iopub.status.idle": "2026-02-09T19:21:43.003874Z", + "shell.execute_reply": "2026-02-09T19:21:42.998265Z" + } + }, + "outputs": [], + "source": [ + "breakpoints = linopy.breakpoints(power=[0, 50, 100, 150], fuel=[0, 55, 130, 225])\n", + "breakpoints.to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8nq1zqvq9re", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.913679Z", + "start_time": "2026-02-09T19:21:33.855910Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.009748Z", + "iopub.status.busy": "2026-02-09T19:21:43.009216Z", + "iopub.status.idle": "2026-02-09T19:21:43.067070Z", + "shell.execute_reply": "2026-02-09T19:21:43.066402Z" + } + }, + "outputs": [], + "source": [ + "m2 = linopy.Model()\n", + "\n", + "power = m2.add_variables(name=\"power\", lower=0, upper=150, coords=[time])\n", + "fuel = m2.add_variables(name=\"fuel\", lower=0, coords=[time])\n", + "\n", + "# breakpoints are auto-broadcast to match the time dimension\n", + "m2.add_piecewise_constraints(\n", + " {\"power\": power, \"fuel\": fuel},\n", + " breakpoints,\n", + " dim=\"breakpoint\",\n", + " name=\"pwl\",\n", + " method=\"incremental\",\n", + ")\n", + "\n", + "demand2 = xr.DataArray([80, 120, 50], coords=[time])\n", + "m2.add_constraints(power >= demand2, name=\"demand\")\n", + "m2.add_objective(fuel.sum())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "incremental-solve", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.981694Z", + "start_time": "2026-02-09T19:21:33.933519Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.070384Z", + "iopub.status.busy": "2026-02-09T19:21:43.070023Z", + "iopub.status.idle": "2026-02-09T19:21:43.124118Z", + "shell.execute_reply": "2026-02-09T19:21:43.123883Z" + } + }, + "outputs": [], + "source": [ + "m2.solve();" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "incremental-results", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:33.991781Z", + "start_time": "2026-02-09T19:21:33.986137Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.125356Z", + "iopub.status.busy": "2026-02-09T19:21:43.125291Z", + "iopub.status.idle": "2026-02-09T19:21:43.129072Z", + "shell.execute_reply": "2026-02-09T19:21:43.128850Z" + } + }, + "outputs": [], + "source": [ + "m2.solution[[\"power\", \"fuel\"]].to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fua98r986pl", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:34.116658Z", + "start_time": "2026-02-09T19:21:34.021992Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.130293Z", + "iopub.status.busy": "2026-02-09T19:21:43.130221Z", + "iopub.status.idle": "2026-02-09T19:21:43.281657Z", + "shell.execute_reply": "2026-02-09T19:21:43.281256Z" + } + }, + "outputs": [], + "source": [ + "plot_pwl_results(m2, breakpoints, demand2, color=\"C1\")" + ] + }, + { + "cell_type": "markdown", + "id": "disjunctive-md", + "metadata": {}, + "source": [ + "## 3. Disjunctive formulation — Diesel generator\n", + "\n", + "The diesel generator has a **forbidden operating zone**: it must either\n", + "be off (0 MW) or run between 50–80 MW. Because of this gap, we add a\n", + "high-cost **backup** source to cover demand when the diesel is off or at\n", + "its maximum." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "disjunctive-setup", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:34.147920Z", + "start_time": "2026-02-09T19:21:34.142740Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.283679Z", + "iopub.status.busy": "2026-02-09T19:21:43.283490Z", + "iopub.status.idle": "2026-02-09T19:21:43.290429Z", + "shell.execute_reply": "2026-02-09T19:21:43.289665Z" + } + }, + "outputs": [], + "source": [ + "breakpoints = linopy.breakpoints.segments([(0, 0), (50, 80)])\n", + "breakpoints.to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "reevc7ood3", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:34.234326Z", + "start_time": "2026-02-09T19:21:34.188461Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.293229Z", + "iopub.status.busy": "2026-02-09T19:21:43.292936Z", + "iopub.status.idle": "2026-02-09T19:21:43.363049Z", + "shell.execute_reply": "2026-02-09T19:21:43.362442Z" + } + }, + "outputs": [], + "source": [ + "m3 = linopy.Model()\n", + "\n", + "power = m3.add_variables(name=\"power\", lower=0, upper=80, coords=[time])\n", + "backup = m3.add_variables(name=\"backup\", lower=0, coords=[time])\n", + "\n", + "# breakpoints are auto-broadcast to match the time dimension\n", + "m3.add_disjunctive_piecewise_constraints(power, breakpoints, name=\"pwl\")\n", + "\n", + "demand3 = xr.DataArray([10, 70, 90], coords=[time])\n", + "m3.add_constraints(power + backup >= demand3, name=\"demand\")\n", + "m3.add_objective((2.5 * power + 10 * backup).sum())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "disjunctive-solve", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:34.322383Z", + "start_time": "2026-02-09T19:21:34.260066Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.366552Z", + "iopub.status.busy": "2026-02-09T19:21:43.366148Z", + "iopub.status.idle": "2026-02-09T19:21:43.457707Z", + "shell.execute_reply": "2026-02-09T19:21:43.457113Z" + } + }, + "outputs": [], + "source": [ + "m3.solve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "disjunctive-results", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:34.333489Z", + "start_time": "2026-02-09T19:21:34.327107Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.459934Z", + "iopub.status.busy": "2026-02-09T19:21:43.459654Z", + "iopub.status.idle": "2026-02-09T19:21:43.468110Z", + "shell.execute_reply": "2026-02-09T19:21:43.465566Z" + } + }, + "outputs": [], + "source": [ + "m3.solution[[\"power\", \"backup\"]].to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "g32vxea6jwe", + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-09T19:21:34.545650Z", + "start_time": "2026-02-09T19:21:34.425456Z" + }, + "execution": { + "iopub.execute_input": "2026-02-09T19:21:43.475302Z", + "iopub.status.busy": "2026-02-09T19:21:43.475060Z", + "iopub.status.idle": "2026-02-09T19:21:43.697893Z", + "shell.execute_reply": "2026-02-09T19:21:43.697398Z" + } + }, + "outputs": [], + "source": [ + "plot_pwl_results(m3, breakpoints, demand3, color=\"C2\", fuel_rate=2.5)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/linopy/__init__.py b/linopy/__init__.py index 3efc297a..7f5acd46 100644 --- a/linopy/__init__.py +++ b/linopy/__init__.py @@ -20,6 +20,7 @@ from linopy.io import read_netcdf from linopy.model import Model, Variable, Variables, available_solvers from linopy.objective import Objective +from linopy.piecewise import breakpoints from linopy.remote import OetcHandler, RemoteHandler __all__ = ( @@ -37,6 +38,7 @@ "Variable", "Variables", "available_solvers", + "breakpoints", "align", "merge", "options", diff --git a/linopy/common.py b/linopy/common.py index 7dd97b65..0823deac 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -286,6 +286,32 @@ def as_dataarray( return arr +def broadcast_mask(mask: DataArray, labels: DataArray) -> DataArray: + """ + Broadcast a boolean mask to match the shape of labels. + + Ensures that mask dimensions are a subset of labels dimensions, broadcasts + the mask accordingly, and fills any NaN values (from missing coordinates) + with False while emitting a FutureWarning. + """ + assert set(mask.dims).issubset(labels.dims), ( + "Dimensions of mask not a subset of resulting labels dimensions." + ) + mask = mask.broadcast_like(labels) + if mask.isnull().any(): + warn( + "Mask contains coordinates not covered by the data dimensions. " + "Missing values will be filled with False (masked out). " + "In a future version, this will raise an error. " + "Use mask.reindex() or `linopy.align()` to explicitly handle missing " + "coordinates.", + FutureWarning, + stacklevel=3, + ) + mask = mask.fillna(False).astype(bool) + return mask + + # TODO: rename to to_pandas_dataframe def to_dataframe( ds: Dataset, @@ -449,6 +475,25 @@ def group_terms_polars(df: pl.DataFrame) -> pl.DataFrame: return df +def maybe_group_terms_polars(df: pl.DataFrame) -> pl.DataFrame: + """ + Group terms only if there are duplicate (labels, vars) pairs. + + This avoids the expensive group_by operation when terms already + reference distinct variables (e.g. ``x - y`` has ``_term=2`` but + no duplicates). When skipping, columns are reordered to match the + output of ``group_terms_polars``. + """ + varcols = [c for c in df.columns if c.startswith("vars")] + keys = [c for c in ["labels"] + varcols if c in df.columns] + key_count = df.select(pl.struct(keys).n_unique()).item() + if key_count < df.height: + return group_terms_polars(df) + # Match column order of group_terms (group-by keys, coeffs, rest) + rest = [c for c in df.columns if c not in keys and c != "coeffs"] + return df.select(keys + ["coeffs"] + rest) + + def save_join(*dataarrays: DataArray, integer_dtype: bool = False) -> Dataset: """ Join multiple xarray Dataarray's to a Dataset and warn if coordinates are not equal. diff --git a/linopy/constants.py b/linopy/constants.py index 021a9a10..c2467b83 100644 --- a/linopy/constants.py +++ b/linopy/constants.py @@ -35,6 +35,17 @@ TERM_DIM = "_term" STACKED_TERM_DIM = "_stacked_term" + +PWL_LAMBDA_SUFFIX = "_lambda" +PWL_CONVEX_SUFFIX = "_convex" +PWL_LINK_SUFFIX = "_link" +PWL_DELTA_SUFFIX = "_delta" +PWL_FILL_SUFFIX = "_fill" +PWL_BINARY_SUFFIX = "_binary" +PWL_SELECT_SUFFIX = "_select" +DEFAULT_BREAKPOINT_DIM = "breakpoint" +DEFAULT_SEGMENT_DIM = "segment" +DEFAULT_LINK_DIM = "var" GROUPED_TERM_DIM = "_grouped_term" GROUP_DIM = "_group" FACTOR_DIM = "_factor" @@ -49,6 +60,11 @@ CV_DIM, ] +# SOS constraint attribute keys +SOS_TYPE_ATTR = "sos_type" +SOS_DIM_ATTR = "sos_dim" +SOS_BIG_M_ATTR = "big_m_upper" + class ModelStatus(Enum): """ diff --git a/linopy/constraints.py b/linopy/constraints.py index 291beb1d..d3ebef19 100644 --- a/linopy/constraints.py +++ b/linopy/constraints.py @@ -40,10 +40,9 @@ generate_indices_for_printout, get_dims_with_index_levels, get_label_position, - group_terms_polars, has_optimized_model, - infer_schema_polars, iterate_slices, + maybe_group_terms_polars, maybe_replace_signs, print_coord, print_single_constraint, @@ -622,21 +621,38 @@ def to_polars(self) -> pl.DataFrame: long = to_polars(ds[keys]) long = filter_nulls_polars(long) - long = group_terms_polars(long) + if ds.sizes.get("_term", 1) > 1: + long = maybe_group_terms_polars(long) check_has_nulls_polars(long, name=f"{self.type} {self.name}") - short_ds = ds[[k for k in ds if "_term" not in ds[k].dims]] - schema = infer_schema_polars(short_ds) - schema["sign"] = pl.Enum(["=", "<=", ">="]) - short = to_polars(short_ds, schema=schema) - short = filter_nulls_polars(short) - check_has_nulls_polars(short, name=f"{self.type} {self.name}") - - df = pl.concat([short, long], how="diagonal_relaxed").sort(["labels", "rhs"]) - # delete subsequent non-null rhs (happens is all vars per label are -1) - is_non_null = df["rhs"].is_not_null() - prev_non_is_null = is_non_null.shift(1).fill_null(False) - df = df.filter(is_non_null & ~prev_non_is_null | ~is_non_null) + # Build short DataFrame (labels, rhs, sign) without xarray broadcast. + # Apply labels mask directly instead of filter_nulls_polars. + labels_flat = ds["labels"].values.reshape(-1) + mask = labels_flat != -1 + labels_masked = labels_flat[mask] + rhs_flat = np.broadcast_to(ds["rhs"].values, ds["labels"].shape).reshape(-1) + + sign_values = ds["sign"].values + sign_flat = np.broadcast_to(sign_values, ds["labels"].shape).reshape(-1) + all_same_sign = len(sign_flat) > 0 and ( + sign_flat[0] == sign_flat[-1] and (sign_flat[0] == sign_flat).all() + ) + + short_data: dict = { + "labels": labels_masked, + "rhs": rhs_flat[mask], + } + if all_same_sign: + short = pl.DataFrame(short_data).with_columns( + pl.lit(sign_flat[0]).cast(pl.Enum(["=", "<=", ">="])).alias("sign") + ) + else: + short_data["sign"] = pl.Series( + "sign", sign_flat[mask], dtype=pl.Enum(["=", "<=", ">="]) + ) + short = pl.DataFrame(short_data) + + df = long.join(short, on="labels", how="inner") return df[["labels", "coeffs", "vars", "sign", "rhs"]] # Wrapped function which would convert variable to dataarray diff --git a/linopy/expressions.py b/linopy/expressions.py index 10e243de..649989f7 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -13,7 +13,7 @@ from collections.abc import Callable, Hashable, Iterator, Mapping, Sequence from dataclasses import dataclass, field from itertools import product, zip_longest -from typing import TYPE_CHECKING, Any, TypeVar, overload +from typing import TYPE_CHECKING, Any, TypeVar, cast, overload from warnings import warn import numpy as np @@ -60,6 +60,7 @@ has_optimized_model, is_constant, iterate_slices, + maybe_group_terms_polars, print_coord, print_single_expression, to_dataframe, @@ -507,12 +508,18 @@ def __neg__(self: GenericExpression) -> GenericExpression: def _multiply_by_linear_expression( self, other: LinearExpression | ScalarLinearExpression - ) -> QuadraticExpression: + ) -> LinearExpression | QuadraticExpression: if isinstance(other, ScalarLinearExpression): other = other.to_linexpr() if other.nterm > 1: raise TypeError("Multiplication of multiple terms is not supported.") + + if other.is_constant: + return cast(LinearExpression, self._multiply_by_constant(other.const)) + if self.is_constant: + return cast(LinearExpression, other._multiply_by_constant(self.const)) + # multiplication: (v1 + c1) * (v2 + c2) = v1 * v2 + c1 * v2 + c2 * v1 + c1 * c2 # with v being the variables and c the constants # merge on factor dimension only returns v1 * v2 + c1 * c2 @@ -1463,7 +1470,7 @@ def to_polars(self) -> pl.DataFrame: df = to_polars(self.data) df = filter_nulls_polars(df) - df = group_terms_polars(df) + df = maybe_group_terms_polars(df) check_has_nulls_polars(df, name=self.type) return df diff --git a/linopy/io.py b/linopy/io.py index 56fe033d..54090e87 100644 --- a/linopy/io.py +++ b/linopy/io.py @@ -25,7 +25,7 @@ from linopy import solvers from linopy.common import to_polars -from linopy.constants import CONCAT_DIM +from linopy.constants import CONCAT_DIM, SOS_DIM_ATTR, SOS_TYPE_ATTR from linopy.objective import Objective if TYPE_CHECKING: @@ -54,6 +54,21 @@ def clean_name(name: str) -> str: coord_sanitizer = str.maketrans("[,]", "(,)", " ") +def _format_and_write( + df: pl.DataFrame, columns: list[pl.Expr], f: BufferedWriter +) -> None: + """ + Format columns via concat_str and write to file. + + Uses Polars streaming engine for better memory efficiency. + """ + df.lazy().select(pl.concat_str(columns, ignore_nulls=True)).collect( + engine="streaming" + ).write_csv( + f, separator=" ", null_value="", quote_style="never", include_header=False + ) + + def signed_number(expr: pl.Expr) -> tuple[pl.Expr, pl.Expr]: """ Return polars expressions for a signed number string, handling -0.0 correctly. @@ -155,10 +170,7 @@ def objective_write_linear_terms( *signed_number(pl.col("coeffs")), *print_variable(pl.col("vars")), ] - df = df.select(pl.concat_str(cols, ignore_nulls=True)) - df.write_csv( - f, separator=" ", null_value="", quote_style="never", include_header=False - ) + _format_and_write(df, cols, f) def objective_write_quadratic_terms( @@ -171,10 +183,7 @@ def objective_write_quadratic_terms( *print_variable(pl.col("vars2")), ] f.write(b"+ [\n") - df = df.select(pl.concat_str(cols, ignore_nulls=True)) - df.write_csv( - f, separator=" ", null_value="", quote_style="never", include_header=False - ) + _format_and_write(df, cols, f) f.write(b"] / 2\n") @@ -254,11 +263,7 @@ def bounds_to_file( *signed_number(pl.col("upper")), ] - kwargs: Any = dict( - separator=" ", null_value="", quote_style="never", include_header=False - ) - formatted = df.select(pl.concat_str(columns, ignore_nulls=True)) - formatted.write_csv(f, **kwargs) + _format_and_write(df, columns, f) def binaries_to_file( @@ -296,11 +301,7 @@ def binaries_to_file( *print_variable(pl.col("labels")), ] - kwargs: Any = dict( - separator=" ", null_value="", quote_style="never", include_header=False - ) - formatted = df.select(pl.concat_str(columns, ignore_nulls=True)) - formatted.write_csv(f, **kwargs) + _format_and_write(df, columns, f) def integers_to_file( @@ -339,11 +340,7 @@ def integers_to_file( *print_variable(pl.col("labels")), ] - kwargs: Any = dict( - separator=" ", null_value="", quote_style="never", include_header=False - ) - formatted = df.select(pl.concat_str(columns, ignore_nulls=True)) - formatted.write_csv(f, **kwargs) + _format_and_write(df, columns, f) def sos_to_file( @@ -374,8 +371,8 @@ def sos_to_file( for name in names: var = m.variables[name] - sos_type = var.attrs["sos_type"] - sos_dim = var.attrs["sos_dim"] + sos_type = var.attrs[SOS_TYPE_ATTR] + sos_dim = var.attrs[SOS_DIM_ATTR] other_dims = [dim for dim in var.labels.dims if dim != sos_dim] for var_slice in var.iterate_slices(slice_size, other_dims): @@ -399,11 +396,7 @@ def sos_to_file( pl.col("var_weights"), ] - kwargs: Any = dict( - separator=" ", null_value="", quote_style="never", include_header=False - ) - formatted = df.select(pl.concat_str(columns, ignore_nulls=True)) - formatted.write_csv(f, **kwargs) + _format_and_write(df, columns, f) def constraints_to_file( @@ -440,58 +433,32 @@ def constraints_to_file( if df.height == 0: continue - # Ensure each constraint has both coefficient and RHS terms - analysis = df.group_by("labels").agg( - [ - pl.col("coeffs").is_not_null().sum().alias("coeff_rows"), - pl.col("sign").is_not_null().sum().alias("rhs_rows"), - ] - ) - - valid = analysis.filter( - (pl.col("coeff_rows") > 0) & (pl.col("rhs_rows") > 0) - ) - - if valid.height == 0: - continue - - # Keep only constraints that have both parts - df = df.join(valid.select("labels"), on="labels", how="inner") - # Sort by labels and mark first/last occurrences df = df.sort("labels").with_columns( [ - pl.when(pl.col("labels").is_first_distinct()) - .then(pl.col("labels")) - .otherwise(pl.lit(None)) - .alias("labels_first"), + pl.col("labels").is_first_distinct().alias("is_first_in_group"), (pl.col("labels") != pl.col("labels").shift(-1)) .fill_null(True) .alias("is_last_in_group"), ] ) - row_labels = print_constraint(pl.col("labels_first")) + row_labels = print_constraint(pl.col("labels")) col_labels = print_variable(pl.col("vars")) columns = [ - pl.when(pl.col("labels_first").is_not_null()).then(row_labels[0]), - pl.when(pl.col("labels_first").is_not_null()).then(row_labels[1]), - pl.when(pl.col("labels_first").is_not_null()) - .then(pl.lit(":\n")) - .alias(":"), + pl.when(pl.col("is_first_in_group")).then(row_labels[0]), + pl.when(pl.col("is_first_in_group")).then(row_labels[1]), + pl.when(pl.col("is_first_in_group")).then(pl.lit(":\n")).alias(":"), *signed_number(pl.col("coeffs")), - pl.when(pl.col("vars").is_not_null()).then(col_labels[0]), - pl.when(pl.col("vars").is_not_null()).then(col_labels[1]), + col_labels[0], + col_labels[1], + pl.when(pl.col("is_last_in_group")).then(pl.lit("\n")), pl.when(pl.col("is_last_in_group")).then(pl.col("sign")), pl.when(pl.col("is_last_in_group")).then(pl.lit(" ")), pl.when(pl.col("is_last_in_group")).then(pl.col("rhs").cast(pl.String)), ] - kwargs: Any = dict( - separator=" ", null_value="", quote_style="never", include_header=False - ) - formatted = df.select(pl.concat_str(columns, ignore_nulls=True)) - formatted.write_csv(f, **kwargs) + _format_and_write(df, columns, f) # in the future, we could use lazy dataframes when they support appending # tp existent files @@ -773,8 +740,8 @@ def to_gurobipy( if m.variables.sos: for var_name in m.variables.sos: var = m.variables.sos[var_name] - sos_type: int = var.attrs["sos_type"] # type: ignore[assignment] - sos_dim: str = var.attrs["sos_dim"] # type: ignore[assignment] + sos_type: int = var.attrs[SOS_TYPE_ATTR] # type: ignore[assignment] + sos_dim: str = var.attrs[SOS_DIM_ATTR] # type: ignore[assignment] def add_sos(s: xr.DataArray, sos_type: int, sos_dim: str) -> None: s = s.squeeze() diff --git a/linopy/model.py b/linopy/model.py index 657b2d45..1901a4b9 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -29,6 +29,7 @@ as_dataarray, assign_multiindex_safe, best_int, + broadcast_mask, maybe_replace_signs, replace_by_map, set_int_index, @@ -38,6 +39,9 @@ GREATER_EQUAL, HELPER_DIMS, LESS_EQUAL, + SOS_BIG_M_ATTR, + SOS_DIM_ATTR, + SOS_TYPE_ATTR, TERM_DIM, ModelStatus, TerminationCondition, @@ -59,12 +63,20 @@ ) from linopy.matrices import MatrixAccessor from linopy.objective import Objective +from linopy.piecewise import ( + add_disjunctive_piecewise_constraints, + add_piecewise_constraints, +) from linopy.remote import OetcHandler, RemoteHandler from linopy.solver_capabilities import SolverFeature, solver_supports from linopy.solvers import ( IO_APIS, available_solvers, ) +from linopy.sos_reformulation import ( + reformulate_sos_constraints, + undo_sos_reformulation, +) from linopy.types import ( ConstantLike, ConstraintLike, @@ -108,6 +120,7 @@ class Model: _cCounter: int _varnameCounter: int _connameCounter: int + _pwlCounter: int _blocks: DataArray | None _chunk: T_Chunks _force_dim_names: bool @@ -130,10 +143,12 @@ class Model: "_cCounter", "_varnameCounter", "_connameCounter", + "_pwlCounter", "_blocks", # TODO: check if these should not be mutable "_chunk", "_force_dim_names", + "_auto_mask", "_solver_dir", "solver_model", "solver_name", @@ -145,6 +160,7 @@ def __init__( solver_dir: str | None = None, chunk: T_Chunks = None, force_dim_names: bool = False, + auto_mask: bool = False, ) -> None: """ Initialize the linopy model. @@ -164,6 +180,10 @@ def __init__( "dim_1" and so on. These helps to avoid unintended broadcasting over dimension. Especially the use of pandas DataFrames and Series may become safer. + auto_mask : bool + Whether to automatically mask variables and constraints where + bounds, coefficients, or RHS values contain NaN. The default is + False. Returns ------- @@ -180,10 +200,12 @@ def __init__( self._cCounter: int = 0 self._varnameCounter: int = 0 self._connameCounter: int = 0 + self._pwlCounter: int = 0 self._blocks: DataArray | None = None self._chunk: T_Chunks = chunk self._force_dim_names: bool = bool(force_dim_names) + self._auto_mask: bool = bool(auto_mask) self._solver_dir: Path = Path( gettempdir() if solver_dir is None else solver_dir ) @@ -314,6 +336,18 @@ def force_dim_names(self) -> bool: def force_dim_names(self, value: bool) -> None: self._force_dim_names = bool(value) + @property + def auto_mask(self) -> bool: + """ + If True, automatically mask variables and constraints where bounds, + coefficients, or RHS values contain NaN. + """ + return self._auto_mask + + @auto_mask.setter + def auto_mask(self, value: bool) -> None: + self._auto_mask = bool(value) + @property def solver_dir(self) -> Path: """ @@ -340,7 +374,9 @@ def scalar_attrs(self) -> list[str]: "_cCounter", "_varnameCounter", "_connameCounter", + "_pwlCounter", "force_dim_names", + "auto_mask", ] def __repr__(self) -> str: @@ -531,6 +567,20 @@ def add_variables( if mask is not None: mask = as_dataarray(mask, coords=data.coords, dims=data.dims).astype(bool) + mask = broadcast_mask(mask, data.labels) + + # Auto-mask based on NaN in bounds (use numpy for speed) + if self.auto_mask: + auto_mask_values = ~np.isnan(data.lower.values) & ~np.isnan( + data.upper.values + ) + auto_mask_arr = DataArray( + auto_mask_values, coords=data.coords, dims=data.dims + ) + if mask is not None: + mask = mask & auto_mask_arr + else: + mask = auto_mask_arr start = self._xCounter end = start + data.labels.size @@ -538,7 +588,7 @@ def add_variables( self._xCounter += data.labels.size if mask is not None: - data.labels.values = data.labels.where(mask, -1).values + data.labels.values = np.where(mask.values, data.labels.values, -1) data = data.assign_attrs( label_range=(start, end), name=name, binary=binary, integer=integer @@ -556,6 +606,7 @@ def add_sos_constraints( variable: Variable, sos_type: Literal[1, 2], sos_dim: str, + big_m: float | None = None, ) -> None: """ Add an sos1 or sos2 constraint for one dimension of a variable @@ -569,15 +620,26 @@ def add_sos_constraints( Type of SOS sos_dim : str Which dimension of variable to add SOS constraint to + big_m : float | None, optional + Big-M value for SOS reformulation. Only used when reformulating + SOS constraints for solvers that don't support them natively. + + - None (default): Use variable upper bounds as Big-M + - float: Custom Big-M value + + The reformulation uses the tighter of big_m and variable upper bound: + M = min(big_m, var.upper). + + Tighter Big-M values improve LP relaxation quality and solve time. """ if sos_type not in (1, 2): raise ValueError(f"sos_type must be 1 or 2, got {sos_type}") if sos_dim not in variable.dims: raise ValueError(f"sos_dim must name a variable dimension, got {sos_dim}") - if "sos_type" in variable.attrs or "sos_dim" in variable.attrs: - existing_sos_type = variable.attrs.get("sos_type") - existing_sos_dim = variable.attrs.get("sos_dim") + if SOS_TYPE_ATTR in variable.attrs or SOS_DIM_ATTR in variable.attrs: + existing_sos_type = variable.attrs.get(SOS_TYPE_ATTR) + existing_sos_dim = variable.attrs.get(SOS_DIM_ATTR) raise ValueError( f"variable already has an sos{existing_sos_type} constraint on {existing_sos_dim}" ) @@ -589,7 +651,16 @@ def add_sos_constraints( f"but got {variable.coords[sos_dim].dtype}" ) - variable.attrs.update(sos_type=sos_type, sos_dim=sos_dim) + attrs_update: dict[str, Any] = {SOS_TYPE_ATTR: sos_type, SOS_DIM_ATTR: sos_dim} + if big_m is not None: + if big_m <= 0: + raise ValueError(f"big_m must be positive, got {big_m}") + attrs_update[SOS_BIG_M_ATTR] = float(big_m) + + variable.attrs.update(attrs_update) + + add_piecewise_constraints = add_piecewise_constraints + add_disjunctive_piecewise_constraints = add_disjunctive_piecewise_constraints def add_constraints( self, @@ -656,6 +727,14 @@ def add_constraints( if sign is not None: sign = maybe_replace_signs(as_dataarray(sign)) + # Capture original RHS for auto-masking before constraint creation + # (NaN values in RHS are lost during constraint creation) + # Use numpy for speed instead of xarray's notnull() + original_rhs_mask = None + if self.auto_mask and rhs is not None: + rhs_da = as_dataarray(rhs) + original_rhs_mask = (rhs_da.coords, rhs_da.dims, ~np.isnan(rhs_da.values)) + if isinstance(lhs, LinearExpression): if sign is None or rhs is None: raise ValueError(msg_sign_rhs_not_none) @@ -703,11 +782,29 @@ def add_constraints( (data,) = xr.broadcast(data, exclude=[TERM_DIM]) if mask is not None: - mask = as_dataarray(mask).astype(bool) - # TODO: simplify - assert set(mask.dims).issubset(data.dims), ( - "Dimensions of mask not a subset of resulting labels dimensions." + mask = as_dataarray(mask, coords=data.coords, dims=data.dims).astype(bool) + mask = broadcast_mask(mask, data.labels) + + # Auto-mask based on null expressions or NaN RHS (use numpy for speed) + if self.auto_mask: + # Check if expression is null: all vars == -1 + # Use max() instead of all() - if max == -1, all are -1 (since valid vars >= 0) + # This is ~30% faster for large term dimensions + vars_all_invalid = data.vars.values.max(axis=-1) == -1 + auto_mask_values = ~vars_all_invalid + if original_rhs_mask is not None: + coords, dims, rhs_notnull = original_rhs_mask + if rhs_notnull.shape != auto_mask_values.shape: + rhs_da = DataArray(rhs_notnull, coords=coords, dims=dims) + rhs_notnull = rhs_da.broadcast_like(data.labels).values + auto_mask_values = auto_mask_values & rhs_notnull + auto_mask_arr = DataArray( + auto_mask_values, coords=data.labels.coords, dims=data.labels.dims ) + if mask is not None: + mask = mask & auto_mask_arr + else: + mask = auto_mask_arr self.check_force_dim_names(data) @@ -717,7 +814,7 @@ def add_constraints( self._cCounter += data.labels.size if mask is not None: - data.labels.values = data.labels.where(mask, -1).values + data.labels.values = np.where(mask.values, data.labels.values, -1) data = data.assign_attrs(label_range=(start, end), name=name) @@ -830,18 +927,22 @@ def remove_sos_constraints(self, variable: Variable) -> None: ------- None. """ - if "sos_type" not in variable.attrs or "sos_dim" not in variable.attrs: + if SOS_TYPE_ATTR not in variable.attrs or SOS_DIM_ATTR not in variable.attrs: raise ValueError(f"Variable '{variable.name}' has no SOS constraints") - sos_type = variable.attrs["sos_type"] - sos_dim = variable.attrs["sos_dim"] + sos_type = variable.attrs[SOS_TYPE_ATTR] + sos_dim = variable.attrs[SOS_DIM_ATTR] - del variable.attrs["sos_type"], variable.attrs["sos_dim"] + del variable.attrs[SOS_TYPE_ATTR], variable.attrs[SOS_DIM_ATTR] + + variable.attrs.pop(SOS_BIG_M_ATTR, None) logger.debug( f"Removed sos{sos_type} constraint on {sos_dim} from {variable.name}" ) + reformulate_sos_constraints = reformulate_sos_constraints + def remove_objective(self) -> None: """ Remove the objective's linear expression from the model. @@ -1126,6 +1227,7 @@ def solve( remote: RemoteHandler | OetcHandler = None, # type: ignore progress: bool | None = None, mock_solve: bool = False, + reformulate_sos: bool = False, **solver_options: Any, ) -> tuple[str, str]: """ @@ -1195,6 +1297,11 @@ def solve( than 10000 variables and constraints. mock_solve : bool, optional Whether to run a mock solve. This will skip the actual solving. Variables will be set to have dummy values + reformulate_sos : bool, optional + Whether to automatically reformulate SOS constraints as binary + linear + constraints for solvers that don't support them natively. + This uses the Big-M method and requires all SOS variables to have finite bounds. + Default is False. **solver_options : kwargs Options passed to the solver. @@ -1292,11 +1399,25 @@ def solve( f"Solver {solver_name} does not support quadratic problems." ) - # SOS constraints are not supported by all solvers - if self.variables.sos and not solver_supports( - solver_name, SolverFeature.SOS_CONSTRAINTS - ): - raise ValueError(f"Solver {solver_name} does not support SOS constraints.") + sos_reform_result = None + if self.variables.sos: + if reformulate_sos and not solver_supports( + solver_name, SolverFeature.SOS_CONSTRAINTS + ): + logger.info(f"Reformulating SOS constraints for solver {solver_name}") + sos_reform_result = reformulate_sos_constraints(self) + elif reformulate_sos and solver_supports( + solver_name, SolverFeature.SOS_CONSTRAINTS + ): + logger.warning( + f"Solver {solver_name} supports SOS natively; " + "reformulate_sos=True is ignored." + ) + elif not solver_supports(solver_name, SolverFeature.SOS_CONSTRAINTS): + raise ValueError( + f"Solver {solver_name} does not support SOS constraints. " + "Use reformulate_sos=True or a solver that supports SOS (gurobi, cplex)." + ) try: solver_class = getattr(solvers, f"{solvers.SolverName(solver_name).name}") @@ -1345,44 +1466,51 @@ def solve( if fn is not None and (os.path.exists(fn) and not keep_files): os.remove(fn) - result.info() - - self.objective._value = result.solution.objective - self.status = result.status.status.value - self.termination_condition = result.status.termination_condition.value - self.solver_model = result.solver_model - self.solver_name = solver_name - - if not result.status.is_ok: - return result.status.status.value, result.status.termination_condition.value + try: + result.info() + + self.objective._value = result.solution.objective + self.status = result.status.status.value + self.termination_condition = result.status.termination_condition.value + self.solver_model = result.solver_model + self.solver_name = solver_name + + if not result.status.is_ok: + return ( + result.status.status.value, + result.status.termination_condition.value, + ) - # map solution and dual to original shape which includes missing values - sol = result.solution.primal.copy() - sol = set_int_index(sol) - sol.loc[-1] = nan + # map solution and dual to original shape which includes missing values + sol = result.solution.primal.copy() + sol = set_int_index(sol) + sol.loc[-1] = nan - for name, var in self.variables.items(): - idx = np.ravel(var.labels) - try: - vals = sol[idx].values.reshape(var.labels.shape) - except KeyError: - vals = sol.reindex(idx).values.reshape(var.labels.shape) - var.solution = xr.DataArray(vals, var.coords) - - if not result.solution.dual.empty: - dual = result.solution.dual.copy() - dual = set_int_index(dual) - dual.loc[-1] = nan - - for name, con in self.constraints.items(): - idx = np.ravel(con.labels) + for name, var in self.variables.items(): + idx = np.ravel(var.labels) try: - vals = dual[idx].values.reshape(con.labels.shape) + vals = sol[idx].values.reshape(var.labels.shape) except KeyError: - vals = dual.reindex(idx).values.reshape(con.labels.shape) - con.dual = xr.DataArray(vals, con.labels.coords) + vals = sol.reindex(idx).values.reshape(var.labels.shape) + var.solution = xr.DataArray(vals, var.coords) + + if not result.solution.dual.empty: + dual = result.solution.dual.copy() + dual = set_int_index(dual) + dual.loc[-1] = nan + + for name, con in self.constraints.items(): + idx = np.ravel(con.labels) + try: + vals = dual[idx].values.reshape(con.labels.shape) + except KeyError: + vals = dual.reindex(idx).values.reshape(con.labels.shape) + con.dual = xr.DataArray(vals, con.labels.coords) - return result.status.status.value, result.status.termination_condition.value + return result.status.status.value, result.status.termination_condition.value + finally: + if sos_reform_result is not None: + undo_sos_reformulation(self, sos_reform_result) def _mock_solve( self, diff --git a/linopy/piecewise.py b/linopy/piecewise.py new file mode 100644 index 00000000..fd42bcc0 --- /dev/null +++ b/linopy/piecewise.py @@ -0,0 +1,899 @@ +""" +Piecewise linear constraint formulations. + +Provides SOS2, incremental, and disjunctive piecewise linear constraint +methods for use with linopy.Model. +""" + +from __future__ import annotations + +from collections.abc import Mapping +from typing import TYPE_CHECKING, Literal + +import numpy as np +import pandas as pd +import xarray as xr +from xarray import DataArray + +from linopy.constants import ( + DEFAULT_BREAKPOINT_DIM, + DEFAULT_LINK_DIM, + DEFAULT_SEGMENT_DIM, + HELPER_DIMS, + PWL_BINARY_SUFFIX, + PWL_CONVEX_SUFFIX, + PWL_DELTA_SUFFIX, + PWL_FILL_SUFFIX, + PWL_LAMBDA_SUFFIX, + PWL_LINK_SUFFIX, + PWL_SELECT_SUFFIX, +) + +if TYPE_CHECKING: + from linopy.constraints import Constraint + from linopy.expressions import LinearExpression + from linopy.model import Model + from linopy.types import LinExprLike + + +def _list_to_array(values: list[float], bp_dim: str) -> DataArray: + arr = np.asarray(values, dtype=float) + if arr.ndim != 1: + raise ValueError(f"Expected a 1D list of numeric values, got shape {arr.shape}") + return DataArray(arr, dims=[bp_dim], coords={bp_dim: np.arange(len(arr))}) + + +def _dict_to_array(d: dict[str, list[float]], dim: str, bp_dim: str) -> DataArray: + max_len = max(len(v) for v in d.values()) + keys = list(d.keys()) + data = np.full((len(keys), max_len), np.nan) + for i, k in enumerate(keys): + vals = d[k] + data[i, : len(vals)] = vals + return DataArray( + data, + dims=[dim, bp_dim], + coords={dim: keys, bp_dim: np.arange(max_len)}, + ) + + +def _segments_list_to_array( + values: list[list[float]], bp_dim: str, seg_dim: str +) -> DataArray: + max_len = max(len(seg) for seg in values) + data = np.full((len(values), max_len), np.nan) + for i, seg in enumerate(values): + data[i, : len(seg)] = seg + return DataArray( + data, + dims=[seg_dim, bp_dim], + coords={seg_dim: np.arange(len(values)), bp_dim: np.arange(max_len)}, + ) + + +def _dict_segments_to_array( + d: dict[str, list[list[float]]], dim: str, bp_dim: str, seg_dim: str +) -> DataArray: + parts = [] + for key, seg_list in d.items(): + arr = _segments_list_to_array(seg_list, bp_dim, seg_dim) + parts.append(arr.expand_dims({dim: [key]})) + combined = xr.concat(parts, dim=dim) + max_bp = max(max(len(seg) for seg in sl) for sl in d.values()) + max_seg = max(len(sl) for sl in d.values()) + if combined.sizes[bp_dim] < max_bp or combined.sizes[seg_dim] < max_seg: + combined = combined.reindex( + {bp_dim: np.arange(max_bp), seg_dim: np.arange(max_seg)}, + fill_value=np.nan, + ) + return combined + + +def _get_entity_keys( + kwargs: Mapping[str, object], +) -> list[str]: + first_dict = next(v for v in kwargs.values() if isinstance(v, dict)) + return list(first_dict.keys()) + + +def _validate_factory_args( + values: list | dict | None, + kwargs: dict, +) -> None: + if values is not None and kwargs: + raise ValueError("Cannot pass both positional 'values' and keyword arguments") + if values is None and not kwargs: + raise ValueError("Must pass either positional 'values' or keyword arguments") + + +def _resolve_kwargs( + kwargs: dict[str, list[float] | dict[str, list[float]] | DataArray], + dim: str | None, + bp_dim: str, + link_dim: str, +) -> DataArray: + has_dict = any(isinstance(v, dict) for v in kwargs.values()) + if has_dict and dim is None: + raise ValueError("'dim' is required when any kwarg value is a dict") + + arrays: dict[str, DataArray] = {} + for name, val in kwargs.items(): + if isinstance(val, DataArray): + arrays[name] = val + elif isinstance(val, dict): + assert dim is not None + arrays[name] = _dict_to_array(val, dim, bp_dim) + elif isinstance(val, list): + base = _list_to_array(val, bp_dim) + if has_dict: + base = base.expand_dims({dim: _get_entity_keys(kwargs)}) + arrays[name] = base + else: + raise ValueError( + f"kwarg '{name}' must be a list, dict, or DataArray, got {type(val)}" + ) + + parts = [arr.expand_dims({link_dim: [name]}) for name, arr in arrays.items()] + return xr.concat(parts, dim=link_dim) + + +def _resolve_segment_kwargs( + kwargs: dict[str, list[list[float]] | dict[str, list[list[float]]] | DataArray], + dim: str | None, + bp_dim: str, + seg_dim: str, + link_dim: str, +) -> DataArray: + has_dict = any(isinstance(v, dict) for v in kwargs.values()) + if has_dict and dim is None: + raise ValueError("'dim' is required when any kwarg value is a dict") + + arrays: dict[str, DataArray] = {} + for name, val in kwargs.items(): + if isinstance(val, DataArray): + arrays[name] = val + elif isinstance(val, dict): + assert dim is not None + arrays[name] = _dict_segments_to_array(val, dim, bp_dim, seg_dim) + elif isinstance(val, list): + base = _segments_list_to_array(val, bp_dim, seg_dim) + if has_dict: + base = base.expand_dims({dim: _get_entity_keys(kwargs)}) + arrays[name] = base + else: + raise ValueError( + f"kwarg '{name}' must be a list, dict, or DataArray, got {type(val)}" + ) + + parts = [arr.expand_dims({link_dim: [name]}) for name, arr in arrays.items()] + combined = xr.concat(parts, dim=link_dim) + max_bp = max(a.sizes.get(bp_dim, 0) for a in arrays.values()) + max_seg = max(a.sizes.get(seg_dim, 0) for a in arrays.values()) + if ( + combined.sizes.get(bp_dim, 0) < max_bp + or combined.sizes.get(seg_dim, 0) < max_seg + ): + combined = combined.reindex( + {bp_dim: np.arange(max_bp), seg_dim: np.arange(max_seg)}, + fill_value=np.nan, + ) + return combined + + +class _BreakpointFactory: + """ + Factory for creating breakpoint DataArrays for piecewise linear constraints. + + Use ``linopy.breakpoints(...)`` for continuous breakpoints and + ``linopy.breakpoints.segments(...)`` for disjunctive (disconnected) segments. + """ + + def __call__( + self, + values: list[float] | dict[str, list[float]] | None = None, + *, + dim: str | None = None, + bp_dim: str = DEFAULT_BREAKPOINT_DIM, + link_dim: str = DEFAULT_LINK_DIM, + **kwargs: list[float] | dict[str, list[float]] | DataArray, + ) -> DataArray: + """ + Create a breakpoint DataArray for piecewise linear constraints. + + Parameters + ---------- + values : list or dict, optional + Breakpoint values. A list creates 1D breakpoints. A dict creates + per-entity breakpoints (requires ``dim``). Cannot be used with kwargs. + dim : str, optional + Entity dimension name. Required when ``values`` is a dict. + bp_dim : str, default "breakpoint" + Name for the breakpoint dimension. + link_dim : str, default "var" + Name for the link dimension when using kwargs. + **kwargs : list, dict, or DataArray + Per-variable breakpoints. Each kwarg becomes a coordinate on the + link dimension. + + Returns + ------- + DataArray + Breakpoint array with appropriate dimensions and coordinates. + """ + _validate_factory_args(values, kwargs) + + if values is not None: + if isinstance(values, list): + return _list_to_array(values, bp_dim) + if isinstance(values, dict): + if dim is None: + raise ValueError("'dim' is required when 'values' is a dict") + return _dict_to_array(values, dim, bp_dim) + raise TypeError(f"'values' must be a list or dict, got {type(values)}") + + return _resolve_kwargs(kwargs, dim, bp_dim, link_dim) + + def segments( + self, + values: list[list[float]] | dict[str, list[list[float]]] | None = None, + *, + dim: str | None = None, + bp_dim: str = DEFAULT_BREAKPOINT_DIM, + seg_dim: str = DEFAULT_SEGMENT_DIM, + link_dim: str = DEFAULT_LINK_DIM, + **kwargs: list[list[float]] | dict[str, list[list[float]]] | DataArray, + ) -> DataArray: + """ + Create a segmented breakpoint DataArray for disjunctive piecewise constraints. + + Parameters + ---------- + values : list or dict, optional + Segment breakpoints. A list of lists creates 2D breakpoints + ``[segment, breakpoint]``. A dict creates per-entity segments + (requires ``dim``). Cannot be used with kwargs. + dim : str, optional + Entity dimension name. Required when ``values`` is a dict. + bp_dim : str, default "breakpoint" + Name for the breakpoint dimension. + seg_dim : str, default "segment" + Name for the segment dimension. + link_dim : str, default "var" + Name for the link dimension when using kwargs. + **kwargs : list, dict, or DataArray + Per-variable segment breakpoints. + + Returns + ------- + DataArray + Breakpoint array with segment and breakpoint dimensions. + """ + _validate_factory_args(values, kwargs) + + if values is not None: + if isinstance(values, list): + return _segments_list_to_array(values, bp_dim, seg_dim) + if isinstance(values, dict): + if dim is None: + raise ValueError("'dim' is required when 'values' is a dict") + return _dict_segments_to_array(values, dim, bp_dim, seg_dim) + raise TypeError(f"'values' must be a list or dict, got {type(values)}") + + return _resolve_segment_kwargs(kwargs, dim, bp_dim, seg_dim, link_dim) + + +breakpoints = _BreakpointFactory() + + +def _auto_broadcast_breakpoints( + bp: DataArray, + expr: LinExprLike | dict[str, LinExprLike], + dim: str, + link_dim: str | None = None, + exclude_dims: set[str] | None = None, +) -> DataArray: + _, target_dims = _validate_piecewise_expr(expr) + + skip = {dim} | set(HELPER_DIMS) + if link_dim is not None: + skip.add(link_dim) + if exclude_dims is not None: + skip.update(exclude_dims) + + target_dims -= skip + missing = target_dims - {str(d) for d in bp.dims} + + if not missing: + return bp + + expand_map: dict[str, list] = {} + all_exprs = expr.values() if isinstance(expr, dict) else [expr] + for d in missing: + for e in all_exprs: + if d in e.coords: + expand_map[str(d)] = list(e.coords[d].values) + break + + if expand_map: + bp = bp.expand_dims(expand_map) + + return bp + + +def _extra_coords(breakpoints: DataArray, *exclude_dims: str | None) -> list[pd.Index]: + excluded = {d for d in exclude_dims if d is not None} + return [ + pd.Index(breakpoints.coords[d].values, name=d) + for d in breakpoints.dims + if d not in excluded + ] + + +def _validate_breakpoints(breakpoints: DataArray, dim: str) -> None: + if dim not in breakpoints.dims: + raise ValueError( + f"breakpoints must have dimension '{dim}', " + f"but only has dimensions {list(breakpoints.dims)}" + ) + + +def _validate_numeric_breakpoint_coords(breakpoints: DataArray, dim: str) -> None: + if not pd.api.types.is_numeric_dtype(breakpoints.coords[dim]): + raise ValueError( + f"Breakpoint dimension '{dim}' must have numeric coordinates " + f"for SOS2 weights, but got {breakpoints.coords[dim].dtype}" + ) + + +def _check_strict_monotonicity(breakpoints: DataArray, dim: str) -> bool: + """ + Check if breakpoints are strictly monotonic along dim. + + Each slice along non-dim dimensions is checked independently, + allowing different slices to have opposite directions (e.g., one + increasing and another decreasing). NaN values are ignored. + """ + diffs = breakpoints.diff(dim) + pos = (diffs > 0) | diffs.isnull() + neg = (diffs < 0) | diffs.isnull() + all_pos_per_slice = pos.all(dim) + all_neg_per_slice = neg.all(dim) + has_non_nan = (~diffs.isnull()).any(dim) + monotonic = (all_pos_per_slice | all_neg_per_slice) & has_non_nan + return bool(monotonic.all()) + + +def _has_trailing_nan_only(breakpoints: DataArray, dim: str) -> bool: + """Check that NaN values in breakpoints only appear as trailing entries along dim.""" + valid = ~breakpoints.isnull() + cummin = np.minimum.accumulate(valid.values, axis=valid.dims.index(dim)) + cummin_da = DataArray(cummin, coords=valid.coords, dims=valid.dims) + return not bool((valid & ~cummin_da).any()) + + +def _to_linexpr(expr: LinExprLike) -> LinearExpression: + from linopy.expressions import LinearExpression + + if isinstance(expr, LinearExpression): + return expr + return expr.to_linexpr() + + +def _validate_piecewise_expr( + expr: LinExprLike | dict[str, LinExprLike], +) -> tuple[bool, set[str]]: + from linopy.expressions import LinearExpression + from linopy.variables import Variable + + _types = (Variable, LinearExpression) + + if isinstance(expr, _types): + return True, {str(d) for d in expr.coord_dims} + + if isinstance(expr, dict): + dims: set[str] = set() + for key, val in expr.items(): + if not isinstance(val, _types): + raise TypeError( + f"dict value for key '{key}' must be a Variable or " + f"LinearExpression, got {type(val)}" + ) + dims.update(str(d) for d in val.coord_dims) + return False, dims + + raise TypeError( + f"'expr' must be a Variable, LinearExpression, or dict of these, " + f"got {type(expr)}" + ) + + +def _compute_mask( + mask: DataArray | None, + breakpoints: DataArray, + skip_nan_check: bool, +) -> DataArray | None: + if mask is not None: + return mask + if skip_nan_check: + return None + return ~breakpoints.isnull() + + +def _resolve_link_dim( + breakpoints: DataArray, + expr_keys: set[str], + exclude_dims: set[str], +) -> str: + for d in breakpoints.dims: + if d in exclude_dims: + continue + coord_set = {str(c) for c in breakpoints.coords[d].values} + if coord_set == expr_keys: + return str(d) + raise ValueError( + "Could not auto-detect linking dimension from breakpoints. " + "Ensure breakpoints have a dimension whose coordinates match " + f"the expression dict keys. " + f"Breakpoint dimensions: {list(breakpoints.dims)}, " + f"expression keys: {list(expr_keys)}" + ) + + +def _build_stacked_expr( + model: Model, + expr_dict: dict[str, LinExprLike], + breakpoints: DataArray, + link_dim: str, +) -> LinearExpression: + from linopy.expressions import LinearExpression + + link_coords = list(breakpoints.coords[link_dim].values) + + expr_data_list = [] + for k in link_coords: + e = expr_dict[str(k)] + linexpr = _to_linexpr(e) + expr_data_list.append(linexpr.data.expand_dims({link_dim: [k]})) + + stacked_data = xr.concat(expr_data_list, dim=link_dim) + return LinearExpression(stacked_data, model) + + +def _resolve_expr( + model: Model, + expr: LinExprLike | dict[str, LinExprLike], + breakpoints: DataArray, + dim: str, + mask: DataArray | None, + skip_nan_check: bool, + exclude_dims: set[str] | None = None, +) -> tuple[LinearExpression, str | None, DataArray | None, DataArray | None]: + is_single, _ = _validate_piecewise_expr(expr) + + computed_mask = _compute_mask(mask, breakpoints, skip_nan_check) + + if is_single: + target_expr = _to_linexpr(expr) # type: ignore[arg-type] + return target_expr, None, computed_mask, computed_mask + + expr_dict: dict[str, LinExprLike] = expr # type: ignore[assignment] + expr_keys = set(expr_dict.keys()) + all_exclude = {dim} | (exclude_dims or set()) + resolved_link_dim = _resolve_link_dim(breakpoints, expr_keys, all_exclude) + lambda_mask = None + if computed_mask is not None: + if resolved_link_dim not in computed_mask.dims: + computed_mask = computed_mask.broadcast_like(breakpoints) + lambda_mask = computed_mask.any(dim=resolved_link_dim) + target_expr = _build_stacked_expr(model, expr_dict, breakpoints, resolved_link_dim) + return target_expr, resolved_link_dim, computed_mask, lambda_mask + + +def _add_pwl_sos2( + model: Model, + name: str, + breakpoints: DataArray, + dim: str, + target_expr: LinearExpression, + lambda_coords: list[pd.Index], + lambda_mask: DataArray | None, +) -> Constraint: + lambda_name = f"{name}{PWL_LAMBDA_SUFFIX}" + convex_name = f"{name}{PWL_CONVEX_SUFFIX}" + link_name = f"{name}{PWL_LINK_SUFFIX}" + + lambda_var = model.add_variables( + lower=0, upper=1, coords=lambda_coords, name=lambda_name, mask=lambda_mask + ) + + model.add_sos_constraints(lambda_var, sos_type=2, sos_dim=dim) + + convex_con = model.add_constraints(lambda_var.sum(dim=dim) == 1, name=convex_name) + + weighted_sum = (lambda_var * breakpoints).sum(dim=dim) + model.add_constraints(target_expr == weighted_sum, name=link_name) + + return convex_con + + +def _add_pwl_incremental( + model: Model, + name: str, + breakpoints: DataArray, + dim: str, + target_expr: LinearExpression, + extra_coords: list[pd.Index], + breakpoint_mask: DataArray | None, + link_dim: str | None, +) -> Constraint: + delta_name = f"{name}{PWL_DELTA_SUFFIX}" + fill_name = f"{name}{PWL_FILL_SUFFIX}" + link_name = f"{name}{PWL_LINK_SUFFIX}" + + n_segments = breakpoints.sizes[dim] - 1 + seg_dim = f"{dim}_seg" + seg_index = pd.Index(range(n_segments), name=seg_dim) + delta_coords = extra_coords + [seg_index] + + steps = breakpoints.diff(dim).rename({dim: seg_dim}) + steps[seg_dim] = seg_index + + if breakpoint_mask is not None: + bp_mask = breakpoint_mask + if link_dim is not None: + bp_mask = bp_mask.all(dim=link_dim) + mask_lo = bp_mask.isel({dim: slice(None, -1)}).rename({dim: seg_dim}) + mask_hi = bp_mask.isel({dim: slice(1, None)}).rename({dim: seg_dim}) + mask_lo[seg_dim] = seg_index + mask_hi[seg_dim] = seg_index + delta_mask: DataArray | None = mask_lo & mask_hi + else: + delta_mask = None + + delta_var = model.add_variables( + lower=0, upper=1, coords=delta_coords, name=delta_name, mask=delta_mask + ) + + fill_con: Constraint | None = None + if n_segments >= 2: + delta_lo = delta_var.isel({seg_dim: slice(None, -1)}, drop=True) + delta_hi = delta_var.isel({seg_dim: slice(1, None)}, drop=True) + fill_con = model.add_constraints(delta_hi <= delta_lo, name=fill_name) + + bp0 = breakpoints.isel({dim: 0}) + weighted_sum = (delta_var * steps).sum(dim=seg_dim) + bp0 + link_con = model.add_constraints(target_expr == weighted_sum, name=link_name) + + return fill_con if fill_con is not None else link_con + + +def _add_dpwl_sos2( + model: Model, + name: str, + breakpoints: DataArray, + dim: str, + segment_dim: str, + target_expr: LinearExpression, + lambda_coords: list[pd.Index], + lambda_mask: DataArray | None, + binary_coords: list[pd.Index], + binary_mask: DataArray | None, +) -> Constraint: + binary_name = f"{name}{PWL_BINARY_SUFFIX}" + select_name = f"{name}{PWL_SELECT_SUFFIX}" + lambda_name = f"{name}{PWL_LAMBDA_SUFFIX}" + convex_name = f"{name}{PWL_CONVEX_SUFFIX}" + link_name = f"{name}{PWL_LINK_SUFFIX}" + + binary_var = model.add_variables( + binary=True, coords=binary_coords, name=binary_name, mask=binary_mask + ) + + select_con = model.add_constraints( + binary_var.sum(dim=segment_dim) == 1, name=select_name + ) + + lambda_var = model.add_variables( + lower=0, upper=1, coords=lambda_coords, name=lambda_name, mask=lambda_mask + ) + + model.add_sos_constraints(lambda_var, sos_type=2, sos_dim=dim) + + model.add_constraints(lambda_var.sum(dim=dim) == binary_var, name=convex_name) + + weighted_sum = (lambda_var * breakpoints).sum(dim=[segment_dim, dim]) + model.add_constraints(target_expr == weighted_sum, name=link_name) + + return select_con + + +def add_piecewise_constraints( + model: Model, + expr: LinExprLike | dict[str, LinExprLike], + breakpoints: DataArray, + dim: str = DEFAULT_BREAKPOINT_DIM, + mask: DataArray | None = None, + name: str | None = None, + skip_nan_check: bool = False, + method: Literal["sos2", "incremental", "auto"] = "sos2", +) -> Constraint: + """ + Add a piecewise linear constraint using SOS2 or incremental formulation. + + This method creates a piecewise linear constraint that links one or more + variables/expressions together via a set of breakpoints. It supports two + formulations: + + - **SOS2** (default): Uses SOS2 (Special Ordered Set of type 2) with lambda + (interpolation) variables. Works for any breakpoints. + - **Incremental**: Uses delta variables with filling-order constraints. + Pure LP formulation (no SOS2 or binary variables), but requires strictly + monotonic breakpoints. + + Parameters + ---------- + model : Model + The linopy model to add the constraint to. + expr : Variable, LinearExpression, or dict of these + The variable(s) or expression(s) to be linked by the piecewise constraint. + - If a single Variable/LinearExpression is passed, the breakpoints + directly specify the piecewise points for that expression. + - If a dict is passed, the keys must match coordinates of a dimension + of the breakpoints, allowing multiple expressions to be linked. + breakpoints : xr.DataArray + The breakpoint values defining the piecewise linear function. + Must have `dim` as one of its dimensions. If `expr` is a dict, + must also have a dimension with coordinates matching the dict keys. + dim : str, default "breakpoint" + The dimension in breakpoints that represents the breakpoint index. + This dimension's coordinates must be numeric (used as SOS2 weights + for the SOS2 method). + mask : xr.DataArray, optional + Boolean mask indicating which piecewise constraints are valid. + If None, auto-detected from NaN values in breakpoints (unless + skip_nan_check is True). + name : str, optional + Base name for the generated variables and constraints. + If None, auto-generates names like "pwl0", "pwl1", etc. + skip_nan_check : bool, default False + If True, skip automatic NaN detection in breakpoints. Use this + when you know breakpoints contain no NaN values for better performance. + method : Literal["sos2", "incremental", "auto"], default "sos2" + Formulation method. One of: + - ``"sos2"``: SOS2 formulation with lambda variables (default). + - ``"incremental"``: Incremental (delta) formulation. Requires strictly + monotonic breakpoints. Pure LP, no SOS2 or binary variables. + - ``"auto"``: Automatically selects ``"incremental"`` if breakpoints are + strictly monotonic, otherwise falls back to ``"sos2"``. + + Returns + ------- + Constraint + For SOS2: the convexity constraint (sum of lambda = 1). + For incremental: the filling-order constraint (or the link + constraint if only 2 breakpoints). + + Raises + ------ + ValueError + If expr is not a Variable, LinearExpression, or dict of these. + If breakpoints doesn't have the required dim dimension. + If the linking dimension cannot be auto-detected when expr is a dict. + If dim coordinates are not numeric (SOS2 method only). + If breakpoints are not strictly monotonic (incremental method). + If method is not one of 'sos2', 'incremental', 'auto'. + + Examples + -------- + Single variable piecewise constraint: + + >>> from linopy import Model + >>> import xarray as xr + >>> m = Model() + >>> x = m.add_variables(name="x") + >>> breakpoints = xr.DataArray([0, 10, 50, 100], dims=["bp"]) + >>> _ = m.add_piecewise_constraints(x, breakpoints, dim="bp") + + Notes + ----- + **SOS2 formulation:** + + 1. Lambda variables λ_i with bounds [0, 1] are created for each breakpoint + 2. SOS2 constraint ensures at most two adjacent λ_i can be non-zero + 3. Convexity constraint: Σ λ_i = 1 + 4. Linking constraints: expr = Σ λ_i × breakpoint_i (for each expression) + + **Incremental formulation** (for strictly monotonic breakpoints bp₀ < bp₁ < ... < bpₙ): + + 1. Delta variables δᵢ ∈ [0, 1] for i = 1, ..., n (one per segment) + 2. Filling-order constraints: δᵢ₊₁ ≤ δᵢ for i = 1, ..., n-1 + 3. Linking constraint: expr = bp₀ + Σᵢ δᵢ × (bpᵢ - bpᵢ₋₁) + """ + if method not in ("sos2", "incremental", "auto"): + raise ValueError( + f"method must be 'sos2', 'incremental', or 'auto', got '{method}'" + ) + + _validate_breakpoints(breakpoints, dim) + breakpoints = _auto_broadcast_breakpoints(breakpoints, expr, dim) + + if method in ("incremental", "auto"): + is_monotonic = _check_strict_monotonicity(breakpoints, dim) + trailing_nan_only = _has_trailing_nan_only(breakpoints, dim) + if method == "auto": + if is_monotonic and trailing_nan_only: + method = "incremental" + else: + method = "sos2" + elif not is_monotonic: + raise ValueError( + "Incremental method requires strictly monotonic breakpoints " + "along the breakpoint dimension." + ) + if method == "incremental" and not trailing_nan_only: + raise ValueError( + "Incremental method does not support non-trailing NaN breakpoints. " + "NaN values must only appear at the end of the breakpoint sequence. " + "Use method='sos2' for breakpoints with gaps." + ) + + if method == "sos2": + _validate_numeric_breakpoint_coords(breakpoints, dim) + + if name is None: + name = f"pwl{model._pwlCounter}" + model._pwlCounter += 1 + + target_expr, resolved_link_dim, computed_mask, lambda_mask = _resolve_expr( + model, expr, breakpoints, dim, mask, skip_nan_check + ) + + extra_coords = _extra_coords(breakpoints, dim, resolved_link_dim) + lambda_coords = extra_coords + [pd.Index(breakpoints.coords[dim].values, name=dim)] + + if method == "sos2": + return _add_pwl_sos2( + model, name, breakpoints, dim, target_expr, lambda_coords, lambda_mask + ) + else: + return _add_pwl_incremental( + model, + name, + breakpoints, + dim, + target_expr, + extra_coords, + computed_mask, + resolved_link_dim, + ) + + +def add_disjunctive_piecewise_constraints( + model: Model, + expr: LinExprLike | dict[str, LinExprLike], + breakpoints: DataArray, + dim: str = DEFAULT_BREAKPOINT_DIM, + segment_dim: str = DEFAULT_SEGMENT_DIM, + mask: DataArray | None = None, + name: str | None = None, + skip_nan_check: bool = False, +) -> Constraint: + """ + Add a disjunctive piecewise linear constraint for disconnected segments. + + Unlike ``add_piecewise_constraints``, which models continuous piecewise + linear functions (all segments connected end-to-end), this method handles + **disconnected segments** (with gaps between them). The variable must lie + on exactly one segment, selected by binary indicator variables. + + Uses the disaggregated convex combination formulation (no big-M needed, + tight LP relaxation): + + 1. Binary ``y_k ∈ {0,1}`` per segment, ``Σ y_k = 1`` + 2. Lambda ``λ_{k,i} ∈ [0,1]`` per breakpoint in each segment + 3. Convexity: ``Σ_i λ_{k,i} = y_k`` + 4. SOS2 within each segment (along breakpoint dim) + 5. Linking: ``expr = Σ_k Σ_i λ_{k,i} × bp_{k,i}`` + + Parameters + ---------- + model : Model + The linopy model to add the constraint to. + expr : Variable, LinearExpression, or dict of these + The variable(s) or expression(s) to be linked by the piecewise + constraint. + breakpoints : xr.DataArray + Breakpoint values with at least ``dim`` and ``segment_dim`` + dimensions. Each slice along ``segment_dim`` defines one segment. + Use NaN to pad segments with fewer breakpoints. + dim : str, default "breakpoint" + Dimension for breakpoint indices within each segment. + Must have numeric coordinates. + segment_dim : str, default "segment" + Dimension indexing the segments. + mask : xr.DataArray, optional + Boolean mask. If None, auto-detected from NaN values. + name : str, optional + Base name for generated variables/constraints. Auto-generated + if None using the shared ``_pwlCounter``. + skip_nan_check : bool, default False + If True, skip NaN detection in breakpoints. + + Returns + ------- + Constraint + The selection constraint (``Σ y_k = 1``). + + Raises + ------ + ValueError + If ``dim`` or ``segment_dim`` not in breakpoints dimensions. + If ``dim == segment_dim``. + If ``dim`` coordinates are not numeric. + If ``expr`` is not a Variable, LinearExpression, or dict. + + Examples + -------- + Two disconnected segments [0,10] and [50,100]: + + >>> from linopy import Model + >>> import xarray as xr + >>> m = Model() + >>> x = m.add_variables(name="x") + >>> breakpoints = xr.DataArray( + ... [[0, 10], [50, 100]], + ... dims=["segment", "breakpoint"], + ... coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ... ) + >>> _ = m.add_disjunctive_piecewise_constraints(x, breakpoints) + """ + _validate_breakpoints(breakpoints, dim) + if segment_dim not in breakpoints.dims: + raise ValueError( + f"breakpoints must have dimension '{segment_dim}', " + f"but only has dimensions {list(breakpoints.dims)}" + ) + if dim == segment_dim: + raise ValueError(f"dim and segment_dim must be different, both are '{dim}'") + _validate_numeric_breakpoint_coords(breakpoints, dim) + breakpoints = _auto_broadcast_breakpoints( + breakpoints, expr, dim, exclude_dims={segment_dim} + ) + + if name is None: + name = f"pwl{model._pwlCounter}" + model._pwlCounter += 1 + + target_expr, resolved_link_dim, computed_mask, lambda_mask = _resolve_expr( + model, + expr, + breakpoints, + dim, + mask, + skip_nan_check, + exclude_dims={segment_dim}, + ) + + extra_coords = _extra_coords(breakpoints, dim, segment_dim, resolved_link_dim) + lambda_coords = extra_coords + [ + pd.Index(breakpoints.coords[segment_dim].values, name=segment_dim), + pd.Index(breakpoints.coords[dim].values, name=dim), + ] + binary_coords = extra_coords + [ + pd.Index(breakpoints.coords[segment_dim].values, name=segment_dim), + ] + + binary_mask = lambda_mask.any(dim=dim) if lambda_mask is not None else None + + return _add_dpwl_sos2( + model, + name, + breakpoints, + dim, + segment_dim, + target_expr, + lambda_coords, + lambda_mask, + binary_coords, + binary_mask, + ) diff --git a/linopy/solver_capabilities.py b/linopy/solver_capabilities.py index 0ffea923..f0507317 100644 --- a/linopy/solver_capabilities.py +++ b/linopy/solver_capabilities.py @@ -161,6 +161,19 @@ def supports(self, feature: SolverFeature) -> bool: } ), ), + "knitro": SolverInfo( + name="knitro", + display_name="Artelys Knitro", + features=frozenset( + { + SolverFeature.INTEGER_VARIABLES, + SolverFeature.QUADRATIC_OBJECTIVE, + SolverFeature.LP_FILE_NAMES, + SolverFeature.READ_MODEL_FROM_FILE, + SolverFeature.SOLUTION_FILE_NOT_NEEDED, + } + ), + ), "scip": SolverInfo( name="scip", display_name="SCIP", diff --git a/linopy/solvers.py b/linopy/solvers.py index fe516b47..16c07932 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -176,6 +176,14 @@ class xpress_Namespaces: # type: ignore[no-redef] SET = 3 +with contextlib.suppress(ModuleNotFoundError, ImportError): + import knitro + + with contextlib.suppress(Exception): + kc = knitro.KN_new() + knitro.KN_free(kc) + available_solvers.append("knitro") + with contextlib.suppress(ModuleNotFoundError): import mosek @@ -239,6 +247,7 @@ class SolverName(enum.Enum): Gurobi = "gurobi" SCIP = "scip" Xpress = "xpress" + Knitro = "knitro" Mosek = "mosek" COPT = "copt" MindOpt = "mindopt" @@ -773,10 +782,10 @@ def get_solver_solution() -> Solution: class Highs(Solver[None]): """ - Solver subclass for the Highs solver. Highs must be installed - for usage. Find the documentation at https://www.maths.ed.ac.uk/hall/HiGHS/. + Solver subclass for the HiGHS solver. HiGHS must be installed + for usage. Find the documentation at https://highs.dev/. - The full list of solver options is documented at https://www.maths.ed.ac.uk/hall/HiGHS/HighsOptions.set. + The full list of solver options is documented at https://ergo-code.github.io/HiGHS/stable/options/definitions/. Some exemplary options are: @@ -808,8 +817,8 @@ def solve_problem_from_model( explicit_coordinate_names: bool = False, ) -> Result: """ - Solve a linear problem directly from a linopy model using the Highs solver. - Reads a linear problem file and passes it to the highs solver. + Solve a linear problem directly from a linopy model using the HiGHS solver. + Reads a linear problem file and passes it to the HiGHS solver. If the solution is feasible the function returns the objective, solution and dual constraint variables. @@ -834,7 +843,7 @@ def solve_problem_from_model( ------- Result """ - # check for Highs solver compatibility + # check for HiGHS solver compatibility if self.solver_options.get("solver") in [ "simplex", "ipm", @@ -871,8 +880,8 @@ def solve_problem_from_file( env: None = None, ) -> Result: """ - Solve a linear problem from a problem file using the Highs solver. - Reads a linear problem file and passes it to the highs solver. + Solve a linear problem from a problem file using the HiGHS solver. + Reads a linear problem file and passes it to the HiGHS solver. If the solution is feasible the function returns the objective, solution and dual constraint variables. @@ -934,13 +943,13 @@ def _solve( sense: str | None = None, ) -> Result: """ - Solve a linear problem from a Highs object. + Solve a linear problem from a HiGHS object. Parameters ---------- h : highspy.Highs - Highs object. + HiGHS object. solution_fn : Path, optional Path to the solution file. log_fn : Path, optional @@ -1252,7 +1261,7 @@ def get_solver_solution() -> Solution: return Solution(sol, dual, objective) solution = self.safe_get_solution(status=status, func=get_solver_solution) - solution = solution = maybe_adjust_objective_sign(solution, io_api, sense) + solution = maybe_adjust_objective_sign(solution, io_api, sense) return Result(status, solution, m) @@ -1736,6 +1745,200 @@ def get_solver_solution() -> Solution: return Result(status, solution, m) +KnitroResult = namedtuple("KnitroResult", "reported_runtime") + + +class Knitro(Solver[None]): + """ + Solver subclass for the Knitro solver. + + For more information on solver options, see + https://www.artelys.com/app/docs/knitro/3_referenceManual/knitroPythonReference.html + + Attributes + ---------- + **solver_options + options for the given solver + """ + + def __init__( + self, + **solver_options: Any, + ) -> None: + super().__init__(**solver_options) + + def solve_problem_from_model( + self, + model: Model, + solution_fn: Path | None = None, + log_fn: Path | None = None, + warmstart_fn: Path | None = None, + basis_fn: Path | None = None, + env: None = None, + explicit_coordinate_names: bool = False, + ) -> Result: + msg = "Direct API not implemented for Knitro" + raise NotImplementedError(msg) + + @staticmethod + def _set_option(kc: Any, name: str, value: Any) -> None: + param_id = knitro.KN_get_param_id(kc, name) + + if isinstance(value, bool): + value = int(value) + + if isinstance(value, int): + knitro.KN_set_int_param(kc, param_id, value) + elif isinstance(value, float): + knitro.KN_set_double_param(kc, param_id, value) + elif isinstance(value, str): + knitro.KN_set_char_param(kc, param_id, value) + else: + msg = f"Unsupported Knitro option type for {name!r}: {type(value).__name__}" + raise TypeError(msg) + + @staticmethod + def _extract_values( + kc: Any, + get_count_fn: Callable[..., Any], + get_values_fn: Callable[..., Any], + get_names_fn: Callable[..., Any], + ) -> pd.Series: + n = int(get_count_fn(kc)) + if n == 0: + return pd.Series(dtype=float) + + values = get_values_fn(kc, n - 1) + names = list(get_names_fn(kc)) + return pd.Series(values, index=names, dtype=float) + + def solve_problem_from_file( + self, + problem_fn: Path, + solution_fn: Path | None = None, + log_fn: Path | None = None, + warmstart_fn: Path | None = None, + basis_fn: Path | None = None, + env: None = None, + ) -> Result: + """ + Solve a linear problem from a problem file using the Knitro solver. + + Parameters + ---------- + problem_fn : Path + Path to the problem file. + solution_fn : Path, optional + Path to the solution file. + log_fn : Path, optional + Path to the log file. + warmstart_fn : Path, optional + Path to the warmstart file. + basis_fn : Path, optional + Path to the basis file. + env : None, optional + Environment for the solver. + + Returns + ------- + Result + """ + CONDITION_MAP: dict[int, TerminationCondition] = { + 0: TerminationCondition.optimal, + -100: TerminationCondition.suboptimal, + -101: TerminationCondition.infeasible, + -102: TerminationCondition.suboptimal, + -200: TerminationCondition.unbounded, + -201: TerminationCondition.infeasible_or_unbounded, + -202: TerminationCondition.iteration_limit, + -203: TerminationCondition.time_limit, + -204: TerminationCondition.terminated_by_limit, + -300: TerminationCondition.unbounded, + -400: TerminationCondition.iteration_limit, + -401: TerminationCondition.time_limit, + -410: TerminationCondition.terminated_by_limit, + -411: TerminationCondition.terminated_by_limit, + } + + READ_OPTIONS: dict[str, str] = {".lp": "l", ".mps": "m"} + + io_api = read_io_api_from_problem_file(problem_fn) + sense = read_sense_from_problem_file(problem_fn) + + suffix = problem_fn.suffix.lower() + if suffix not in READ_OPTIONS: + msg = f"Unsupported problem file format: {suffix}" + raise ValueError(msg) + + kc = knitro.KN_new() + try: + knitro.KN_read_problem( + kc, + path_to_string(problem_fn), + read_options=READ_OPTIONS[suffix], + ) + + if log_fn is not None: + logger.warning("Log file output not implemented for Knitro") + + for k, v in self.solver_options.items(): + self._set_option(kc, k, v) + + ret = int(knitro.KN_solve(kc)) + + reported_runtime: float | None = None + with contextlib.suppress(Exception): + reported_runtime = float(knitro.KN_get_solve_time_real(kc)) + + if ret in CONDITION_MAP: + termination_condition = CONDITION_MAP[ret] + elif ret > 0: + termination_condition = TerminationCondition.internal_solver_error + else: + termination_condition = TerminationCondition.unknown + + status = Status.from_termination_condition(termination_condition) + status.legacy_status = str(ret) + + def get_solver_solution() -> Solution: + objective = float(knitro.KN_get_obj_value(kc)) + + sol = self._extract_values( + kc, + knitro.KN_get_number_vars, + knitro.KN_get_var_primal_values, + knitro.KN_get_var_names, + ) + + try: + dual = self._extract_values( + kc, + knitro.KN_get_number_cons, + knitro.KN_get_con_dual_values, + knitro.KN_get_con_names, + ) + except Exception: + logger.warning("Dual values couldn't be parsed") + dual = pd.Series(dtype=float) + + return Solution(sol, dual, objective) + + solution = self.safe_get_solution(status=status, func=get_solver_solution) + solution = maybe_adjust_objective_sign(solution, io_api, sense) + + if solution_fn is not None: + solution_fn.parent.mkdir(exist_ok=True) + knitro.KN_write_mps_file(kc, path_to_string(solution_fn)) + + return Result( + status, solution, KnitroResult(reported_runtime=reported_runtime) + ) + + finally: + with contextlib.suppress(Exception): + knitro.KN_free(kc) + + mosek_bas_re = re.compile(r" (XL|XU)\s+([^ \t]+)\s+([^ \t]+)| (LL|UL|BS)\s+([^ \t]+)") diff --git a/linopy/sos_reformulation.py b/linopy/sos_reformulation.py new file mode 100644 index 00000000..8ccb7613 --- /dev/null +++ b/linopy/sos_reformulation.py @@ -0,0 +1,328 @@ +""" +SOS constraint reformulation using Big-M method. + +Converts SOS1/SOS2 constraints to binary + linear constraints for solvers +that don't support them natively. +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass, field +from typing import TYPE_CHECKING + +import numpy as np +import pandas as pd + +from linopy.constants import SOS_BIG_M_ATTR, SOS_DIM_ATTR, SOS_TYPE_ATTR + +if TYPE_CHECKING: + from xarray import DataArray + + from linopy.model import Model + from linopy.variables import Variable + +logger = logging.getLogger(__name__) + + +@dataclass +class SOSReformulationResult: + """Tracks what was added/changed during SOS reformulation for undo.""" + + reformulated: list[str] = field(default_factory=list) + added_variables: list[str] = field(default_factory=list) + added_constraints: list[str] = field(default_factory=list) + saved_attrs: dict[str, dict] = field(default_factory=dict) + + +def compute_big_m_values(var: Variable) -> DataArray: + """ + Compute Big-M values from variable bounds and custom big_m attribute. + + Uses the tighter of variable upper bound and custom big_m to ensure + the best possible LP relaxation. + + Parameters + ---------- + var : Variable + Variable with bounds (and optionally big_m_upper attr). + + Returns + ------- + DataArray + M_upper for reformulation constraints: x <= M_upper * y + + Raises + ------ + ValueError + If variable has negative lower bounds or infinite upper bounds. + """ + # SOS reformulation requires non-negative variables + if (var.lower < 0).any(): + raise ValueError( + f"Variable '{var.name}' has negative lower bounds. " + "SOS reformulation requires non-negative variables (lower >= 0)." + ) + + big_m_upper = var.attrs.get(SOS_BIG_M_ATTR) + M_upper = var.upper + + if big_m_upper is not None: + M_upper = M_upper.clip(max=big_m_upper) # type: ignore[arg-type] + + # Validate finiteness + if np.isinf(M_upper).any(): + raise ValueError( + f"Variable '{var.name}' has infinite upper bounds. " + "Set finite bounds or specify big_m in add_sos_constraints()." + ) + + return M_upper + + +def reformulate_sos1( + model: Model, var: Variable, prefix: str, M: DataArray | None = None +) -> tuple[list[str], list[str]]: + """ + Reformulate SOS1 constraint as binary + linear constraints. + + For each x[i] with upper bound M[i]: + - Add binary indicator y[i] + - x[i] <= M[i] * y[i] + - sum(y) <= 1 + + Parameters + ---------- + model : Model + Model to add reformulation constraints to. + var : Variable + Variable with SOS1 constraint (must have non-negative lower bounds). + prefix : str + Prefix for naming auxiliary variables and constraints. + M : DataArray, optional + Precomputed Big-M values. Computed from variable bounds if not provided. + + Returns + ------- + tuple[list[str], list[str]] + Names of added variables and constraints. + """ + if M is None: + M = compute_big_m_values(var) + sos_dim = str(var.attrs[SOS_DIM_ATTR]) + name = var.name + + y_name = f"{prefix}{name}_y" + upper_name = f"{prefix}{name}_upper" + card_name = f"{prefix}{name}_card" + + coords = [var.coords[d] for d in var.dims] + y = model.add_variables(coords=coords, name=y_name, binary=True) + + model.add_constraints(var <= M * y, name=upper_name) + model.add_constraints(y.sum(dim=sos_dim) <= 1, name=card_name) + + return [y_name], [upper_name, card_name] + + +def reformulate_sos2( + model: Model, var: Variable, prefix: str, M: DataArray | None = None +) -> tuple[list[str], list[str]]: + """ + Reformulate SOS2 constraint as binary + linear constraints. + + For ordered x[0..n-1] with upper bounds M[i]: + - Add n-1 binary segment indicators z[i] + - x[0] <= M[0] * z[0] + - x[i] <= M[i] * (z[i-1] + z[i]) for middle elements + - x[n-1] <= M[n-1] * z[n-2] + - sum(z) <= 1 + + Parameters + ---------- + model : Model + Model to add reformulation constraints to. + var : Variable + Variable with SOS2 constraint (must have non-negative lower bounds). + prefix : str + Prefix for naming auxiliary variables and constraints. + M : DataArray, optional + Precomputed Big-M values. Computed from variable bounds if not provided. + + Returns + ------- + tuple[list[str], list[str]] + Names of added variables and constraints. + """ + sos_dim = str(var.attrs[SOS_DIM_ATTR]) + name = var.name + n = var.sizes[sos_dim] + + if n <= 1: + return [], [] + + if M is None: + M = compute_big_m_values(var) + + z_name = f"{prefix}{name}_z" + first_name = f"{prefix}{name}_upper_first" + last_name = f"{prefix}{name}_upper_last" + card_name = f"{prefix}{name}_card" + + z_coords = [ + pd.Index(var.coords[sos_dim].values[:-1], name=sos_dim) + if d == sos_dim + else var.coords[d] + for d in var.dims + ] + z = model.add_variables(coords=z_coords, name=z_name, binary=True) + + x_expr, z_expr = 1 * var, 1 * z + + added_constraints = [first_name] + + model.add_constraints( + x_expr.isel({sos_dim: 0}) <= M.isel({sos_dim: 0}) * z_expr.isel({sos_dim: 0}), + name=first_name, + ) + + if n > 2: + mid_slice = slice(1, n - 1) + x_mid = x_expr.isel({sos_dim: mid_slice}) + M_mid = M.isel({sos_dim: mid_slice}) + + z_left_coords = var.coords[sos_dim].values[: n - 2] + z_right_coords = var.coords[sos_dim].values[1 : n - 1] + + z_left = z_expr.sel({sos_dim: z_left_coords}) + z_right = z_expr.sel({sos_dim: z_right_coords}) + + z_left_aligned = z_left.assign_coords({sos_dim: M_mid.coords[sos_dim].values}) + z_right_aligned = z_right.assign_coords({sos_dim: M_mid.coords[sos_dim].values}) + + mid_name = f"{prefix}{name}_upper_mid" + model.add_constraints( + x_mid <= M_mid * (z_left_aligned + z_right_aligned), + name=mid_name, + ) + added_constraints.append(mid_name) + + model.add_constraints( + x_expr.isel({sos_dim: n - 1}) + <= M.isel({sos_dim: n - 1}) * z_expr.isel({sos_dim: n - 2}), + name=last_name, + ) + added_constraints.extend([last_name, card_name]) + + model.add_constraints(z.sum(dim=sos_dim) <= 1, name=card_name) + + return [z_name], added_constraints + + +def reformulate_sos_constraints( + model: Model, prefix: str = "_sos_reform_" +) -> SOSReformulationResult: + """ + Reformulate SOS constraints as binary + linear constraints. + + This converts SOS1 and SOS2 constraints into equivalent binary variable + formulations using the Big-M method. This allows solving models with SOS + constraints using solvers that don't support them natively (e.g., HiGHS, GLPK). + + Big-M values are determined as follows: + 1. If custom big_m was specified in add_sos_constraints(), use that + 2. Otherwise, use the variable bounds (tightest valid Big-M) + + Note: This permanently mutates the model. To solve with automatic + undo, use ``model.solve(reformulate_sos=True)`` instead. + + Parameters + ---------- + model : Model + Model containing SOS constraints to reformulate. + prefix : str, optional + Prefix for auxiliary variables and constraints. Default: "_sos_reform_" + + Returns + ------- + SOSReformulationResult + Tracks what was changed, enabling undo via ``undo_sos_reformulation``. + """ + result = SOSReformulationResult() + + try: + for var_name in list(model.variables.sos): + var = model.variables[var_name] + sos_type = var.attrs[SOS_TYPE_ATTR] + sos_dim = var.attrs[SOS_DIM_ATTR] + + if var.sizes[sos_dim] <= 1: + result.saved_attrs[var_name] = dict(var.attrs) + model.remove_sos_constraints(var) + result.reformulated.append(var_name) + continue + + M = compute_big_m_values(var) + if (M == 0).all(): + result.saved_attrs[var_name] = dict(var.attrs) + model.remove_sos_constraints(var) + result.reformulated.append(var_name) + continue + + result.saved_attrs[var_name] = dict(var.attrs) + + sort_idx = np.argsort(var.coords[sos_dim].values) + if not np.all(sort_idx[:-1] <= sort_idx[1:]): + sorted_var = var.isel({sos_dim: sort_idx}) + M = M.isel({sos_dim: sort_idx}) + else: + sorted_var = var + + if sos_type == 1: + added_vars, added_cons = reformulate_sos1(model, sorted_var, prefix, M) + elif sos_type == 2: + added_vars, added_cons = reformulate_sos2(model, sorted_var, prefix, M) + else: + raise ValueError( + f"Unknown sos_type={sos_type} on variable '{var_name}'" + ) + + result.added_variables.extend(added_vars) + result.added_constraints.extend(added_cons) + + model.remove_sos_constraints(var) + result.reformulated.append(var_name) + except Exception: + undo_sos_reformulation(model, result) + raise + + logger.info(f"Reformulated {len(result.reformulated)} SOS constraint(s)") + return result + + +def undo_sos_reformulation(model: Model, result: SOSReformulationResult) -> None: + """ + Undo a previous SOS reformulation, restoring the model to its original state. + + Parameters + ---------- + model : Model + Model that was reformulated. + result : SOSReformulationResult + Result from ``reformulate_all_sos`` tracking what was added. + """ + objective_value = model.objective._value + + for con_name in result.added_constraints: + if con_name in model.constraints: + model.remove_constraints(con_name) + + for var_name in result.added_variables: + if var_name in model.variables: + model.remove_variables(var_name) + + for var_name, attrs in result.saved_attrs.items(): + if var_name in model.variables: + model.variables[var_name].attrs.update(attrs) + + model.objective._value = objective_value diff --git a/linopy/types.py b/linopy/types.py index 68e5c307..0e3662bf 100644 --- a/linopy/types.py +++ b/linopy/types.py @@ -47,6 +47,7 @@ "QuadraticExpression", ] ConstraintLike = Union["Constraint", "AnonymousScalarConstraint"] +LinExprLike = Union["Variable", "LinearExpression"] MaskLike = Union[numpy.ndarray, DataArray, Series, DataFrame] # noqa: UP007 SideLike = Union[ConstantLike, VariableLike, ExpressionLike] # noqa: UP007 PathLike = Union[str, Path] # noqa: UP007 diff --git a/linopy/variables.py b/linopy/variables.py index e2570b5d..beaeb4e6 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -14,6 +14,7 @@ from typing import ( TYPE_CHECKING, Any, + cast, overload, ) from warnings import warn @@ -52,7 +53,7 @@ to_polars, ) from linopy.config import options -from linopy.constants import HELPER_DIMS, TERM_DIM +from linopy.constants import HELPER_DIMS, SOS_DIM_ATTR, SOS_TYPE_ATTR, TERM_DIM from linopy.solver_capabilities import SolverFeature, solver_supports from linopy.types import ( ConstantLike, @@ -195,10 +196,10 @@ def __init__( if "label_range" not in data.attrs: data.assign_attrs(label_range=(data.labels.min(), data.labels.max())) - if "sos_type" in data.attrs or "sos_dim" in data.attrs: - if (sos_type := data.attrs.get("sos_type")) not in (1, 2): + if SOS_TYPE_ATTR in data.attrs or SOS_DIM_ATTR in data.attrs: + if (sos_type := data.attrs.get(SOS_TYPE_ATTR)) not in (1, 2): raise ValueError(f"sos_type must be 1 or 2, got {sos_type}") - if (sos_dim := data.attrs.get("sos_dim")) not in data.dims: + if (sos_dim := data.attrs.get(SOS_DIM_ATTR)) not in data.dims: raise ValueError( f"sos_dim must name a variable dimension, got {sos_dim}" ) @@ -328,8 +329,8 @@ def __repr__(self) -> str: dim_names = self.coord_names dim_sizes = list(self.sizes.values()) masked_entries = (~self.mask).sum().values - sos_type = self.attrs.get("sos_type") - sos_dim = self.attrs.get("sos_dim") + sos_type = self.attrs.get(SOS_TYPE_ATTR) + sos_dim = self.attrs.get(SOS_DIM_ATTR) lines = [] if dims: @@ -420,7 +421,9 @@ def __pow__(self, other: int) -> QuadraticExpression: return NotImplemented if other == 2: expr = self.to_linexpr() - return expr._multiply_by_linear_expression(expr) + return cast( + "QuadraticExpression", expr._multiply_by_linear_expression(expr) + ) raise ValueError("Can only raise to the power of 2") @overload @@ -1244,8 +1247,8 @@ def __repr__(self) -> str: if ds.coords else "" ) - if (sos_type := ds.attrs.get("sos_type")) in (1, 2) and ( - sos_dim := ds.attrs.get("sos_dim") + if (sos_type := ds.attrs.get(SOS_TYPE_ATTR)) in (1, 2) and ( + sos_dim := ds.attrs.get(SOS_DIM_ATTR) ): coords += f" - sos{sos_type} on {sos_dim}" r += f" * {name}{coords}\n" @@ -1401,8 +1404,8 @@ def sos(self) -> Variables: { name: self.data[name] for name in self - if self[name].attrs.get("sos_dim") - and self[name].attrs.get("sos_type") in (1, 2) + if self[name].attrs.get(SOS_DIM_ATTR) + and self[name].attrs.get(SOS_TYPE_ATTR) in (1, 2) }, self.model, ) diff --git a/pyproject.toml b/pyproject.toml index 52d5e3d5..0f5bd326 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ dependencies = [ "numexpr", "xarray>=2024.2.0", "dask>=0.18.0", - "polars", + "polars>=1.31.1", "tqdm", "deprecation", "packaging", @@ -84,6 +84,7 @@ solvers = [ "coptpy!=7.2.1", "xpress; platform_system != 'Darwin' and python_version < '3.11'", "pyscipopt; platform_system != 'Darwin'", + "knitro>=15.1.0", # "cupdlpx>=0.1.2", pip package currently unstable ] diff --git a/test/test_common.py b/test/test_common.py index db218375..c3500155 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -23,6 +23,7 @@ get_dims_with_index_levels, is_constant, iterate_slices, + maybe_group_terms_polars, ) from linopy.testing import assert_linequal, assert_varequal @@ -737,3 +738,20 @@ def test_is_constant() -> None: ] for cv in constant_values: assert is_constant(cv) + + +def test_maybe_group_terms_polars_no_duplicates() -> None: + """Fast path: distinct (labels, vars) pairs skip group_by.""" + df = pl.DataFrame({"labels": [0, 0], "vars": [1, 2], "coeffs": [3.0, 4.0]}) + result = maybe_group_terms_polars(df) + assert result.shape == (2, 3) + assert result.columns == ["labels", "vars", "coeffs"] + assert result["coeffs"].to_list() == [3.0, 4.0] + + +def test_maybe_group_terms_polars_with_duplicates() -> None: + """Slow path: duplicate (labels, vars) pairs trigger group_by.""" + df = pl.DataFrame({"labels": [0, 0], "vars": [1, 1], "coeffs": [3.0, 4.0]}) + result = maybe_group_terms_polars(df) + assert result.shape == (1, 3) + assert result["coeffs"].to_list() == [7.0] diff --git a/test/test_constraint.py b/test/test_constraint.py index 35f49ea2..bfd29a6e 100644 --- a/test/test_constraint.py +++ b/test/test_constraint.py @@ -437,6 +437,20 @@ def test_constraint_to_polars(c: linopy.constraints.Constraint) -> None: assert isinstance(c.to_polars(), pl.DataFrame) +def test_constraint_to_polars_mixed_signs(m: Model, x: linopy.Variable) -> None: + """Test to_polars when a constraint has mixed sign values across dims.""" + # Create a constraint, then manually patch the sign to have mixed values + m.add_constraints(x >= 0, name="mixed") + con = m.constraints["mixed"] + # Replace sign data with mixed signs across the first dimension + n = con.data.sizes["first"] + signs = np.array(["<=" if i % 2 == 0 else ">=" for i in range(n)]) + con.data["sign"] = xr.DataArray(signs, dims=con.data["sign"].dims) + df = con.to_polars() + assert isinstance(df, pl.DataFrame) + assert set(df["sign"].to_list()) == {"<=", ">="} + + def test_constraint_assignment_with_anonymous_constraints( m: Model, x: linopy.Variable, y: linopy.Variable ) -> None: diff --git a/test/test_constraints.py b/test/test_constraints.py index cca010e8..01aebb69 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -162,6 +162,41 @@ def test_masked_constraints() -> None: assert (m.constraints.labels.con0[5:10, :] == -1).all() +def test_masked_constraints_broadcast() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper) + y = m.add_variables() + + mask = pd.Series([True] * 5 + [False] * 5) + m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc1", mask=mask) + assert (m.constraints.labels.bc1[0:5, :] != -1).all() + assert (m.constraints.labels.bc1[5:10, :] == -1).all() + + mask2 = xr.DataArray([True] * 5 + [False] * 5, dims=["dim_1"]) + m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc2", mask=mask2) + assert (m.constraints.labels.bc2[:, 0:5] != -1).all() + assert (m.constraints.labels.bc2[:, 5:10] == -1).all() + + mask3 = xr.DataArray( + [True, True, False, False, False], + dims=["dim_0"], + coords={"dim_0": range(5)}, + ) + with pytest.warns(FutureWarning, match="Missing values will be filled"): + m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc3", mask=mask3) + assert (m.constraints.labels.bc3[0:2, :] != -1).all() + assert (m.constraints.labels.bc3[2:5, :] == -1).all() + assert (m.constraints.labels.bc3[5:10, :] == -1).all() + + # Mask with extra dimension not in data should raise + mask4 = xr.DataArray([True, False], dims=["extra_dim"]) + with pytest.raises(AssertionError, match="not a subset"): + m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc4", mask=mask4) + + def test_non_aligned_constraints() -> None: m: Model = Model() diff --git a/test/test_io.py b/test/test_io.py index 4336f29d..e8ded144 100644 --- a/test/test_io.py +++ b/test/test_io.py @@ -336,3 +336,40 @@ def test_to_file_lp_with_negative_zero_coefficients(tmp_path: Path) -> None: # Verify Gurobi can read it without errors gurobipy.read(str(fn)) + + +def test_to_file_lp_same_sign_constraints(tmp_path: Path) -> None: + """Test LP writing when all constraints have the same sign operator.""" + m = Model() + N = np.arange(5) + x = m.add_variables(coords=[N], name="x") + # All constraints use <= + m.add_constraints(x <= 10, name="upper") + m.add_constraints(x <= 20, name="upper2") + m.add_objective(x.sum()) + + fn = tmp_path / "same_sign.lp" + m.to_file(fn) + content = fn.read_text() + assert "s.t." in content + assert "<=" in content + + +def test_to_file_lp_mixed_sign_constraints(tmp_path: Path) -> None: + """Test LP writing when constraints have different sign operators.""" + m = Model() + N = np.arange(5) + x = m.add_variables(coords=[N], name="x") + # Mix of <= and >= constraints in the same container + m.add_constraints(x <= 10, name="upper") + m.add_constraints(x >= 1, name="lower") + m.add_constraints(2 * x == 8, name="eq") + m.add_objective(x.sum()) + + fn = tmp_path / "mixed_sign.lp" + m.to_file(fn) + content = fn.read_text() + assert "s.t." in content + assert "<=" in content + assert ">=" in content + assert "=" in content diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index a75ace3f..0da9ec7f 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -1313,3 +1313,89 @@ def test_simplify_partial_cancellation(x: Variable, y: Variable) -> None: assert all(simplified.coeffs.values == 3.0), ( f"Expected coefficient 3.0, got {simplified.coeffs.values}" ) + + +def test_constant_only_expression_mul_dataarray(m: Model) -> None: + const_arr = xr.DataArray([2, 3], dims=["dim_0"]) + const_expr = LinearExpression(const_arr, m) + assert const_expr.is_constant + assert const_expr.nterm == 0 + + data_arr = xr.DataArray([10, 20], dims=["dim_0"]) + expected_const = const_arr * data_arr + + result = const_expr * data_arr + assert isinstance(result, LinearExpression) + assert result.is_constant + assert (result.const == expected_const).all() + + result_rev = data_arr * const_expr + assert isinstance(result_rev, LinearExpression) + assert result_rev.is_constant + assert (result_rev.const == expected_const).all() + + +def test_constant_only_expression_mul_linexpr_with_vars(m: Model, x: Variable) -> None: + const_arr = xr.DataArray([2, 3], dims=["dim_0"]) + const_expr = LinearExpression(const_arr, m) + assert const_expr.is_constant + assert const_expr.nterm == 0 + + expr_with_vars = 1 * x + 5 + expected_coeffs = const_arr + expected_const = const_arr * 5 + + result = const_expr * expr_with_vars + assert isinstance(result, LinearExpression) + assert (result.coeffs == expected_coeffs).all() + assert (result.const == expected_const).all() + + result_rev = expr_with_vars * const_expr + assert isinstance(result_rev, LinearExpression) + assert (result_rev.coeffs == expected_coeffs).all() + assert (result_rev.const == expected_const).all() + + +def test_constant_only_expression_mul_constant_only(m: Model) -> None: + const_arr = xr.DataArray([2, 3], dims=["dim_0"]) + const_arr2 = xr.DataArray([4, 5], dims=["dim_0"]) + const_expr = LinearExpression(const_arr, m) + const_expr2 = LinearExpression(const_arr2, m) + assert const_expr.is_constant + assert const_expr2.is_constant + + expected_const = const_arr * const_arr2 + + result = const_expr * const_expr2 + assert isinstance(result, LinearExpression) + assert result.is_constant + assert (result.const == expected_const).all() + + result_rev = const_expr2 * const_expr + assert isinstance(result_rev, LinearExpression) + assert result_rev.is_constant + assert (result_rev.const == expected_const).all() + + +def test_constant_only_expression_mul_linexpr_with_vars_and_const( + m: Model, x: Variable +) -> None: + const_arr = xr.DataArray([2, 3], dims=["dim_0"]) + const_expr = LinearExpression(const_arr, m) + assert const_expr.is_constant + + expr_with_vars_and_const = 4 * x + 10 + expected_coeffs = const_arr * 4 + expected_const = const_arr * 10 + + result = const_expr * expr_with_vars_and_const + assert isinstance(result, LinearExpression) + assert not result.is_constant + assert (result.coeffs == expected_coeffs).all() + assert (result.const == expected_const).all() + + result_rev = expr_with_vars_and_const * const_expr + assert isinstance(result_rev, LinearExpression) + assert not result_rev.is_constant + assert (result_rev.coeffs == expected_coeffs).all() + assert (result_rev.const == expected_const).all() diff --git a/test/test_optimization.py b/test/test_optimization.py index ff790d6e..492d703a 100644 --- a/test/test_optimization.py +++ b/test/test_optimization.py @@ -1091,6 +1091,70 @@ def test_solver_classes_direct( solver_.solve_problem(model=model) +@pytest.fixture +def auto_mask_variable_model() -> Model: + """Model with auto_mask=True and NaN in variable bounds.""" + m = Model(auto_mask=True) + + x = m.add_variables(lower=0, coords=[range(10)], name="x") + lower = pd.Series([0.0] * 8 + [np.nan, np.nan], range(10)) + y = m.add_variables(lower=lower, name="y") # NaN bounds auto-masked + + m.add_constraints(x + y, GREATER_EQUAL, 10) + m.add_constraints(y, GREATER_EQUAL, 0) + m.add_objective(2 * x + y) + return m + + +@pytest.fixture +def auto_mask_constraint_model() -> Model: + """Model with auto_mask=True and NaN in constraint RHS.""" + m = Model(auto_mask=True) + + x = m.add_variables(lower=0, coords=[range(10)], name="x") + y = m.add_variables(lower=0, coords=[range(10)], name="y") + + rhs = pd.Series([10.0] * 8 + [np.nan, np.nan], range(10)) + m.add_constraints(x + y, GREATER_EQUAL, rhs) # NaN rhs auto-masked + m.add_constraints(x + y, GREATER_EQUAL, 5) + + m.add_objective(2 * x + y) + return m + + +@pytest.mark.parametrize("solver,io_api,explicit_coordinate_names", params) +def test_auto_mask_variable_model( + auto_mask_variable_model: Model, + solver: str, + io_api: str, + explicit_coordinate_names: bool, +) -> None: + """Test that auto_mask=True correctly masks variables with NaN bounds.""" + auto_mask_variable_model.solve( + solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names + ) + y = auto_mask_variable_model.variables.y + # Same assertions as test_masked_variable_model + assert y.solution[-2:].isnull().all() + assert y.solution[:-2].notnull().all() + + +@pytest.mark.parametrize("solver,io_api,explicit_coordinate_names", params) +def test_auto_mask_constraint_model( + auto_mask_constraint_model: Model, + solver: str, + io_api: str, + explicit_coordinate_names: bool, +) -> None: + """Test that auto_mask=True correctly masks constraints with NaN RHS.""" + auto_mask_constraint_model.solve( + solver, io_api=io_api, explicit_coordinate_names=explicit_coordinate_names + ) + # Same assertions as test_masked_constraint_model + assert (auto_mask_constraint_model.solution.y[:-2] == 10).all() + assert (auto_mask_constraint_model.solution.y[-2:] == 5).all() + + # def init_model_large(): # m = Model() # time = pd.Index(range(10), name="time") diff --git a/test/test_piecewise_constraints.py b/test/test_piecewise_constraints.py new file mode 100644 index 00000000..aeb76ec7 --- /dev/null +++ b/test/test_piecewise_constraints.py @@ -0,0 +1,2127 @@ +"""Tests for piecewise linear constraints.""" + +from __future__ import annotations + +from pathlib import Path + +import numpy as np +import pandas as pd +import pytest +import xarray as xr + +from linopy import Model, available_solvers, breakpoints +from linopy.constants import ( + PWL_BINARY_SUFFIX, + PWL_CONVEX_SUFFIX, + PWL_DELTA_SUFFIX, + PWL_FILL_SUFFIX, + PWL_LAMBDA_SUFFIX, + PWL_LINK_SUFFIX, + PWL_SELECT_SUFFIX, +) +from linopy.solver_capabilities import SolverFeature, get_available_solvers_with_feature + + +class TestBasicSingleVariable: + """Tests for single variable piecewise constraints.""" + + def test_basic_single_variable(self) -> None: + """Test basic piecewise constraint with a single variable.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, 10, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp") + + # Check lambda variables were created + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + + # Check constraints were created + assert f"pwl0{PWL_CONVEX_SUFFIX}" in m.constraints + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + # Check SOS2 constraint was added + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert lambda_var.attrs.get("sos_type") == 2 + assert lambda_var.attrs.get("sos_dim") == "bp" + + def test_single_variable_with_coords(self) -> None: + """Test piecewise constraint with a variable that has coordinates.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + x = m.add_variables(coords=[generators], name="x") + + bp_coords = [0, 1, 2] + breakpoints = xr.DataArray( + [[0, 50, 100], [0, 30, 80]], + dims=["generator", "bp"], + coords={"generator": generators, "bp": bp_coords}, + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp") + + # Lambda should have both generator and bp dimensions + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in lambda_var.dims + assert "bp" in lambda_var.dims + + +class TestDictOfVariables: + """Tests for dict of variables (multiple linked variables).""" + + def test_dict_of_variables(self) -> None: + """Test piecewise constraint with multiple linked variables.""" + m = Model() + power = m.add_variables(name="power") + efficiency = m.add_variables(name="efficiency") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0.8, 0.95, 0.9]], + dims=["var", "bp"], + coords={"var": ["power", "efficiency"], "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints( + {"power": power, "efficiency": efficiency}, + breakpoints, + dim="bp", + ) + + # Check single linking constraint was created for all variables + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_dict_with_coordinates(self) -> None: + """Test dict of variables with additional coordinates.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + power = m.add_variables(coords=[generators], name="power") + efficiency = m.add_variables(coords=[generators], name="efficiency") + + breakpoints = xr.DataArray( + [[[0, 50, 100], [0.8, 0.95, 0.9]], [[0, 30, 80], [0.75, 0.9, 0.85]]], + dims=["generator", "var", "bp"], + coords={ + "generator": generators, + "var": ["power", "efficiency"], + "bp": [0, 1, 2], + }, + ) + + m.add_piecewise_constraints( + {"power": power, "efficiency": efficiency}, + breakpoints, + dim="bp", + ) + + # Lambda should have generator and bp dimensions (not var) + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in lambda_var.dims + assert "bp" in lambda_var.dims + assert "var" not in lambda_var.dims + + +class TestAutoDetectLinkDim: + """Tests for auto-detection of linking dimension.""" + + def test_auto_detect_linking_dim(self) -> None: + """Test that linking dimension is auto-detected from breakpoints.""" + m = Model() + power = m.add_variables(name="power") + efficiency = m.add_variables(name="efficiency") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0.8, 0.95, 0.9]], + dims=["var", "bp"], + coords={"var": ["power", "efficiency"], "bp": [0, 1, 2]}, + ) + + # Should auto-detect linking dim="var" + m.add_piecewise_constraints( + {"power": power, "efficiency": efficiency}, + breakpoints, + dim="bp", + ) + + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_auto_detect_fails_with_no_match(self) -> None: + """Test that auto-detection fails when no dimension matches keys.""" + m = Model() + power = m.add_variables(name="power") + efficiency = m.add_variables(name="efficiency") + + # Dimension 'wrong' doesn't match variable keys + breakpoints = xr.DataArray( + [[0, 50, 100], [0.8, 0.95, 0.9]], + dims=["wrong", "bp"], + coords={"wrong": ["a", "b"], "bp": [0, 1, 2]}, + ) + + with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): + m.add_piecewise_constraints( + {"power": power, "efficiency": efficiency}, + breakpoints, + dim="bp", + ) + + +class TestMasking: + """Tests for masking functionality.""" + + def test_nan_masking(self) -> None: + """Test that NaN values in breakpoints create masked constraints.""" + m = Model() + x = m.add_variables(name="x") + + # Third breakpoint is NaN + breakpoints = xr.DataArray( + [0, 10, np.nan, 100], + dims=["bp"], + coords={"bp": [0, 1, 2, 3]}, + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp") + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + # Non-NaN breakpoints (0, 1, 3) should have valid labels + assert int(lambda_var.labels.sel(bp=0)) != -1 + assert int(lambda_var.labels.sel(bp=1)) != -1 + assert int(lambda_var.labels.sel(bp=3)) != -1 + # NaN breakpoint (2) should be masked + assert int(lambda_var.labels.sel(bp=2)) == -1 + + def test_explicit_mask(self) -> None: + """Test user-provided mask.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + x = m.add_variables(coords=[generators], name="x") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0, 30, 80]], + dims=["generator", "bp"], + coords={"generator": generators, "bp": [0, 1, 2]}, + ) + + # Mask out gen2 + mask = xr.DataArray( + [[True, True, True], [False, False, False]], + dims=["generator", "bp"], + coords={"generator": generators, "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", mask=mask) + + # Should still create variables and constraints + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + + def test_skip_nan_check(self) -> None: + """Test skip_nan_check parameter for performance.""" + m = Model() + x = m.add_variables(name="x") + + # Breakpoints with no NaNs + breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + + # Should work with skip_nan_check=True + m.add_piecewise_constraints(x, breakpoints, dim="bp", skip_nan_check=True) + + # All lambda variables should be valid (no masking) + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert (lambda_var.labels != -1).all() + + def test_dict_mask_without_linking_dim(self) -> None: + """Test dict case accepts broadcastable mask without linking dimension.""" + m = Model() + power = m.add_variables(name="power") + efficiency = m.add_variables(name="efficiency") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0.8, 0.95, 0.9]], + dims=["var", "bp"], + coords={"var": ["power", "efficiency"], "bp": [0, 1, 2]}, + ) + + # Mask over bp only; should broadcast across var + mask = xr.DataArray([True, False, True], dims=["bp"], coords={"bp": [0, 1, 2]}) + + m.add_piecewise_constraints( + {"power": power, "efficiency": efficiency}, + breakpoints, + dim="bp", + mask=mask, + ) + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert (lambda_var.labels.sel(bp=0) != -1).all() + assert (lambda_var.labels.sel(bp=1) == -1).all() + assert (lambda_var.labels.sel(bp=2) != -1).all() + + +class TestMultiDimensional: + """Tests for multi-dimensional piecewise constraints.""" + + def test_multi_dimensional(self) -> None: + """Test piecewise constraint with multiple loop dimensions.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + timesteps = pd.Index([0, 1, 2], name="time") + x = m.add_variables(coords=[generators, timesteps], name="x") + + rng = np.random.default_rng(42) + breakpoints = xr.DataArray( + rng.random((2, 3, 4)) * 100, + dims=["generator", "time", "bp"], + coords={"generator": generators, "time": timesteps, "bp": [0, 1, 2, 3]}, + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp") + + # Lambda should have all dimensions + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in lambda_var.dims + assert "time" in lambda_var.dims + assert "bp" in lambda_var.dims + + +class TestValidationErrors: + """Tests for input validation.""" + + def test_invalid_vars_type(self) -> None: + """Test error when expr is not Variable, LinearExpression, or dict.""" + m = Model() + + breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + + with pytest.raises( + TypeError, match="must be a Variable, LinearExpression, or dict" + ): + m.add_piecewise_constraints("invalid", breakpoints, dim="bp") # type: ignore + + def test_invalid_dict_value_type(self) -> None: + m = Model() + bp = xr.DataArray( + [[0, 50], [0, 10]], + dims=["var", "bp"], + coords={"var": ["x", "y"], "bp": [0, 1]}, + ) + with pytest.raises(TypeError, match="dict value for key 'x'"): + m.add_piecewise_constraints({"x": "bad", "y": "bad"}, bp, dim="bp") # type: ignore + + def test_missing_dim(self) -> None: + """Test error when breakpoints don't have the required dim.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray([0, 10, 50], dims=["wrong"]) + + with pytest.raises(ValueError, match="must have dimension"): + m.add_piecewise_constraints(x, breakpoints, dim="bp") + + def test_non_numeric_dim(self) -> None: + """Test error when dim coordinates are not numeric.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, 10, 50], + dims=["bp"], + coords={"bp": ["a", "b", "c"]}, # Non-numeric + ) + + with pytest.raises(ValueError, match="numeric coordinates"): + m.add_piecewise_constraints(x, breakpoints, dim="bp") + + def test_expression_support(self) -> None: + """Test that LinearExpression is supported as input.""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + + breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + + # Should work with a LinearExpression + m.add_piecewise_constraints(x + y, breakpoints, dim="bp") + + # Check constraints were created + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_no_matching_linking_dim(self) -> None: + """Test error when no breakpoints dimension matches dict keys.""" + m = Model() + power = m.add_variables(name="power") + efficiency = m.add_variables(name="efficiency") + + breakpoints = xr.DataArray([0, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2]}) + + with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): + m.add_piecewise_constraints( + {"power": power, "efficiency": efficiency}, + breakpoints, + dim="bp", + ) + + def test_linking_dim_coords_mismatch(self) -> None: + """Test error when breakpoint dimension coords don't match dict keys.""" + m = Model() + power = m.add_variables(name="power") + efficiency = m.add_variables(name="efficiency") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0.8, 0.95, 0.9]], + dims=["var", "bp"], + coords={"var": ["wrong1", "wrong2"], "bp": [0, 1, 2]}, + ) + + with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): + m.add_piecewise_constraints( + {"power": power, "efficiency": efficiency}, + breakpoints, + dim="bp", + ) + + +class TestNameGeneration: + """Tests for automatic name generation.""" + + def test_auto_name_generation(self) -> None: + """Test that names are auto-generated correctly.""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + + bp1 = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + bp2 = xr.DataArray([0, 20, 80], dims=["bp"], coords={"bp": [0, 1, 2]}) + + m.add_piecewise_constraints(x, bp1, dim="bp") + m.add_piecewise_constraints(y, bp2, dim="bp") + + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl1{PWL_LAMBDA_SUFFIX}" in m.variables + + def test_custom_name(self) -> None: + """Test using a custom name.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", name="my_pwl") + + assert f"my_pwl{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"my_pwl{PWL_CONVEX_SUFFIX}" in m.constraints + assert f"my_pwl{PWL_LINK_SUFFIX}" in m.constraints + + +class TestLPFileOutput: + """Tests for LP file output with piecewise constraints.""" + + def test_piecewise_written_to_lp(self, tmp_path: Path) -> None: + """Test that piecewise constraints are properly written to LP file.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0.0, 10.0, 50.0], + dims=["bp"], + coords={"bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp") + + # Add a simple objective to make it a valid LP + m.add_objective(x) + + fn = tmp_path / "pwl.lp" + m.to_file(fn, io_api="lp") + content = fn.read_text() + + # Should contain SOS2 section + assert "\nsos\n" in content.lower() + assert "s2" in content.lower() + + +@pytest.mark.skipif("gurobi" not in available_solvers, reason="Gurobi not installed") +class TestSolverIntegration: + """Integration tests with Gurobi solver.""" + + def test_solve_single_variable(self) -> None: + """Test solving a model with piecewise constraint.""" + gurobipy = pytest.importorskip("gurobipy") + + m = Model() + # Variable that should be between 0 and 100 + x = m.add_variables(lower=0, upper=100, name="x") + + # Piecewise linear cost function: cost = f(x) + # f(0) = 0, f(50) = 10, f(100) = 50 + cost = m.add_variables(name="cost") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0, 10, 50]], + dims=["var", "bp"], + coords={"var": ["x", "cost"], "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints({"x": x, "cost": cost}, breakpoints, dim="bp") + + # Minimize cost, but need x >= 50 to make it interesting + m.add_constraints(x >= 50, name="x_min") + m.add_objective(cost) + + try: + status, cond = m.solve(solver_name="gurobi", io_api="direct") + except gurobipy.GurobiError as exc: + pytest.skip(f"Gurobi environment unavailable: {exc}") + + assert status == "ok" + # At x=50, cost should be 10 + assert np.isclose(x.solution.values, 50, atol=1e-5) + assert np.isclose(cost.solution.values, 10, atol=1e-5) + + def test_solve_efficiency_curve(self) -> None: + """Test solving with a realistic efficiency curve.""" + gurobipy = pytest.importorskip("gurobipy") + + m = Model() + power = m.add_variables(lower=0, upper=100, name="power") + efficiency = m.add_variables(name="efficiency") + + # Efficiency curve: starts low, peaks, then decreases + # power: 0 25 50 75 100 + # efficiency: 0.7 0.85 0.95 0.9 0.8 + breakpoints = xr.DataArray( + [[0, 25, 50, 75, 100], [0.7, 0.85, 0.95, 0.9, 0.8]], + dims=["var", "bp"], + coords={"var": ["power", "efficiency"], "bp": [0, 1, 2, 3, 4]}, + ) + + m.add_piecewise_constraints( + {"power": power, "efficiency": efficiency}, + breakpoints, + dim="bp", + ) + + # Maximize efficiency + m.add_objective(efficiency, sense="max") + + try: + status, cond = m.solve(solver_name="gurobi", io_api="direct") + except gurobipy.GurobiError as exc: + pytest.skip(f"Gurobi environment unavailable: {exc}") + + assert status == "ok" + # Maximum efficiency is at power=50 + assert np.isclose(power.solution.values, 50, atol=1e-5) + assert np.isclose(efficiency.solution.values, 0.95, atol=1e-5) + + def test_solve_multi_generator(self) -> None: + """Test with multiple generators each with different curves.""" + gurobipy = pytest.importorskip("gurobipy") + + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + power = m.add_variables(lower=0, upper=100, coords=[generators], name="power") + cost = m.add_variables(coords=[generators], name="cost") + + # Different cost curves for each generator + # gen1: cheaper at low power, expensive at high + # gen2: more expensive at low power, cheaper at high + breakpoints = xr.DataArray( + [ + [[0, 50, 100], [0, 5, 30]], # gen1: power, cost + [[0, 50, 100], [0, 15, 20]], # gen2: power, cost + ], + dims=["generator", "var", "bp"], + coords={ + "generator": generators, + "var": ["power", "cost"], + "bp": [0, 1, 2], + }, + ) + + m.add_piecewise_constraints( + {"power": power, "cost": cost}, breakpoints, dim="bp" + ) + + # Need total power of 120 + m.add_constraints(power.sum() >= 120, name="demand") + + # Minimize total cost + m.add_objective(cost.sum()) + + try: + status, cond = m.solve(solver_name="gurobi", io_api="direct") + except gurobipy.GurobiError as exc: + pytest.skip(f"Gurobi environment unavailable: {exc}") + + assert status == "ok" + # gen1 should provide ~50 (cheap up to 50), gen2 provides rest + total_power = power.solution.sum().values + assert np.isclose(total_power, 120, atol=1e-5) + + +class TestIncrementalFormulation: + """Tests for the incremental (delta) piecewise formulation.""" + + def test_single_variable_incremental(self) -> None: + """Test incremental formulation with a single variable.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, 10, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + + # Check delta variables created + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + # 3 segments → 3 delta vars + delta_var = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] + assert "bp_seg" in delta_var.dims + assert len(delta_var.coords["bp_seg"]) == 3 + + # Check filling-order constraint (single vectorized constraint) + assert f"pwl0{PWL_FILL_SUFFIX}" in m.constraints + + # Check link constraint + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + # No SOS2 or lambda variables + assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables + + def test_two_breakpoints_incremental(self) -> None: + """Test incremental with only 2 breakpoints (1 segment, no fill constraints).""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray([0, 100], dims=["bp"], coords={"bp": [0, 1]}) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + + # 1 segment → 1 delta var, no filling constraints + delta_var = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] + assert len(delta_var.coords["bp_seg"]) == 1 + + # Link constraint should exist + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_dict_incremental(self) -> None: + """Test incremental formulation with dict of variables.""" + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + + # Both power and cost breakpoints are strictly increasing + breakpoints = xr.DataArray( + [[0, 50, 100], [0, 10, 50]], + dims=["var", "bp"], + coords={"var": ["power", "cost"], "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + dim="bp", + method="incremental", + ) + + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_non_monotonic_raises(self) -> None: + """Test that non-monotonic breakpoints raise ValueError for incremental.""" + m = Model() + x = m.add_variables(name="x") + + # Not monotonic: 0, 50, 30 + breakpoints = xr.DataArray([0, 50, 30], dims=["bp"], coords={"bp": [0, 1, 2]}) + + with pytest.raises(ValueError, match="strictly monotonic"): + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + + def test_decreasing_monotonic_works(self) -> None: + """Test that strictly decreasing breakpoints work for incremental.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [100, 50, 10, 0], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + + def test_opposite_directions_in_dict(self) -> None: + """Test that dict with opposite monotonic directions works.""" + m = Model() + power = m.add_variables(name="power") + eff = m.add_variables(name="eff") + + # power increasing, efficiency decreasing + breakpoints = xr.DataArray( + [[0, 50, 100], [0.95, 0.9, 0.8]], + dims=["var", "bp"], + coords={"var": ["power", "eff"], "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints( + {"power": power, "eff": eff}, + breakpoints, + dim="bp", + method="incremental", + ) + + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_nan_breakpoints_monotonic(self) -> None: + """Test that trailing NaN breakpoints don't break monotonicity check.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, 10, 100, np.nan], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="auto") + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + + def test_auto_selects_incremental(self) -> None: + """Test method='auto' selects incremental for monotonic breakpoints.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, 10, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="auto") + + # Should use incremental (delta vars, no lambda) + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables + + def test_auto_selects_sos2(self) -> None: + """Test method='auto' falls back to sos2 for non-monotonic breakpoints.""" + m = Model() + x = m.add_variables(name="x") + + # Non-monotonic across the full array (dict case would have linking dimension) + # For single expr, breakpoints along dim are [0, 50, 30] + breakpoints = xr.DataArray([0, 50, 30], dims=["bp"], coords={"bp": [0, 1, 2]}) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="auto") + + # Should use sos2 (lambda vars, no delta) + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_DELTA_SUFFIX}" not in m.variables + + def test_invalid_method_raises(self) -> None: + """Test that an invalid method raises ValueError.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + + with pytest.raises(ValueError, match="method must be"): + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="invalid") # type: ignore[arg-type] + + def test_incremental_with_coords(self) -> None: + """Test incremental formulation with extra coordinates.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + x = m.add_variables(coords=[generators], name="x") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0, 30, 80]], + dims=["generator", "bp"], + coords={"generator": generators, "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + + delta_var = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] + assert "generator" in delta_var.dims + assert "bp_seg" in delta_var.dims + + +# ===== Disjunctive Piecewise Linear Constraint Tests ===== + + +class TestDisjunctiveBasicSingleVariable: + """Tests for single variable disjunctive piecewise constraints.""" + + def test_two_equal_segments(self) -> None: + """Test with two equal-length segments.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + # Binary variables created + assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables + # Selection constraint + assert f"pwl0{PWL_SELECT_SUFFIX}" in m.constraints + # Lambda variables + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + # Convexity constraint + assert f"pwl0{PWL_CONVEX_SUFFIX}" in m.constraints + # Link constraint + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + # SOS2 on lambda + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert lambda_var.attrs.get("sos_type") == 2 + assert lambda_var.attrs.get("sos_dim") == "breakpoint" + + def test_uneven_segments_with_nan(self) -> None: + """Test segments of different lengths with NaN padding.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 5, 10], [50, 100, np.nan]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + # Lambda for NaN breakpoint should be masked + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "segment" in lambda_var.dims + assert "breakpoint" in lambda_var.dims + + def test_single_breakpoint_segment(self) -> None: + """Test with a segment that has only one valid breakpoint (point segment).""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 10], [42, np.nan]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables + + def test_single_variable_with_coords(self) -> None: + """Test coordinates are preserved on binary and lambda variables.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + x = m.add_variables(coords=[generators], name="x") + + breakpoints = xr.DataArray( + [ + [[0, 10], [50, 100]], + [[0, 20], [60, 90]], + ], + dims=["generator", "segment", "breakpoint"], + coords={ + "generator": generators, + "segment": [0, 1], + "breakpoint": [0, 1], + }, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + + # Both should preserve generator coordinates + assert list(binary_var.coords["generator"].values) == ["gen1", "gen2"] + assert list(lambda_var.coords["generator"].values) == ["gen1", "gen2"] + + # Binary has (generator, segment), lambda has (generator, segment, breakpoint) + assert set(binary_var.dims) == {"generator", "segment"} + assert set(lambda_var.dims) == {"generator", "segment", "breakpoint"} + + def test_return_value_is_selection_constraint(self) -> None: + """Test the return value is the selection constraint.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + result = m.add_disjunctive_piecewise_constraints(x, breakpoints) + + # Return value should be the selection constraint + assert result is not None + select_name = f"pwl0{PWL_SELECT_SUFFIX}" + assert select_name in m.constraints + + +class TestDisjunctiveDictOfVariables: + """Tests for dict of variables with disjunctive constraints.""" + + def test_dict_with_two_segments(self) -> None: + """Test dict of variables with two segments.""" + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + + breakpoints = xr.DataArray( + [[[0, 50], [0, 10]], [[80, 100], [20, 50]]], + dims=["segment", "var", "breakpoint"], + coords={ + "segment": [0, 1], + "var": ["power", "cost"], + "breakpoint": [0, 1], + }, + ) + + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + ) + + assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_auto_detect_linking_dim_with_segment_dim(self) -> None: + """Test auto-detection of linking dimension when segment_dim is also present.""" + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + + breakpoints = xr.DataArray( + [[[0, 50], [0, 10]], [[80, 100], [20, 50]]], + dims=["segment", "var", "breakpoint"], + coords={ + "segment": [0, 1], + "var": ["power", "cost"], + "breakpoint": [0, 1], + }, + ) + + # Should auto-detect linking dim="var" (not segment) + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + ) + + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + +class TestDisjunctiveExtraDimensions: + """Tests for extra dimensions on disjunctive constraints.""" + + def test_extra_generator_dimension(self) -> None: + """Test with an extra generator dimension.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + x = m.add_variables(coords=[generators], name="x") + + breakpoints = xr.DataArray( + [ + [[0, 10], [50, 100]], + [[0, 20], [60, 90]], + ], + dims=["generator", "segment", "breakpoint"], + coords={ + "generator": generators, + "segment": [0, 1], + "breakpoint": [0, 1], + }, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + # Binary and lambda should have generator dimension + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in binary_var.dims + assert "generator" in lambda_var.dims + assert "segment" in binary_var.dims + assert "segment" in lambda_var.dims + + def test_multi_dimensional_generator_time(self) -> None: + """Test variable with generator + time coords, verify all dims present.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + timesteps = pd.Index([0, 1, 2], name="time") + x = m.add_variables(coords=[generators, timesteps], name="x") + + rng = np.random.default_rng(42) + bp_data = rng.random((2, 3, 2, 2)) * 100 + # Sort breakpoints within each segment + bp_data = np.sort(bp_data, axis=-1) + + breakpoints = xr.DataArray( + bp_data, + dims=["generator", "time", "segment", "breakpoint"], + coords={ + "generator": generators, + "time": timesteps, + "segment": [0, 1], + "breakpoint": [0, 1], + }, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + + # All extra dims should be present + for dim_name in ["generator", "time", "segment"]: + assert dim_name in binary_var.dims + for dim_name in ["generator", "time", "segment", "breakpoint"]: + assert dim_name in lambda_var.dims + + def test_dict_with_additional_coords(self) -> None: + """Test dict of variables with extra generator dim, binary/lambda exclude linking dimension.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + power = m.add_variables(coords=[generators], name="power") + cost = m.add_variables(coords=[generators], name="cost") + + breakpoints = xr.DataArray( + [ + [[[0, 50], [0, 10]], [[80, 100], [20, 30]]], + [[[0, 40], [0, 8]], [[70, 90], [15, 25]]], + ], + dims=["generator", "segment", "var", "breakpoint"], + coords={ + "generator": generators, + "segment": [0, 1], + "var": ["power", "cost"], + "breakpoint": [0, 1], + }, + ) + + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + ) + + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + + # linking dimension (var) should NOT be in binary or lambda dims + assert "var" not in binary_var.dims + assert "var" not in lambda_var.dims + + # generator should be present + assert "generator" in binary_var.dims + assert "generator" in lambda_var.dims + + +class TestDisjunctiveMasking: + """Tests for masking functionality in disjunctive constraints.""" + + def test_nan_masking_labels(self) -> None: + """Test NaN breakpoints mask lambda labels to -1.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 5, 10], [50, 100, np.nan]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + # Segment 0: all 3 breakpoints valid (labels != -1) + seg0_labels = lambda_var.labels.sel(segment=0) + assert (seg0_labels != -1).all() + # Segment 1: breakpoint 2 is NaN → masked (label == -1) + seg1_bp2_label = lambda_var.labels.sel(segment=1, breakpoint=2) + assert int(seg1_bp2_label) == -1 + + # Binary: both segments have at least one valid breakpoint + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + assert (binary_var.labels != -1).all() + + def test_nan_masking_partial_segment(self) -> None: + """Test partial NaN — lambda masked but segment binary still valid.""" + m = Model() + x = m.add_variables(name="x") + + # Segment 0 has 3 valid breakpoints, segment 1 has 2 valid + 1 NaN + breakpoints = xr.DataArray( + [[0, 5, 10], [50, 100, np.nan]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + + # Segment 1 binary is still valid (has 2 valid breakpoints) + assert int(binary_var.labels.sel(segment=1)) != -1 + + # Segment 1 valid lambdas (breakpoint 0, 1) should be valid + assert int(lambda_var.labels.sel(segment=1, breakpoint=0)) != -1 + assert int(lambda_var.labels.sel(segment=1, breakpoint=1)) != -1 + + def test_explicit_mask(self) -> None: + """Test user-provided mask disables specific entries.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + # Mask out entire segment 1 + mask = xr.DataArray( + [[True, True], [False, False]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints, mask=mask) + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + + # Segment 0 lambdas should be valid + assert (lambda_var.labels.sel(segment=0) != -1).all() + # Segment 1 lambdas should be masked + assert (lambda_var.labels.sel(segment=1) == -1).all() + # Segment 1 binary should be masked (no valid breakpoints) + assert int(binary_var.labels.sel(segment=1)) == -1 + + def test_skip_nan_check(self) -> None: + """Test skip_nan_check=True treats all breakpoints as valid.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 5, 10], [50, 100, np.nan]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints, skip_nan_check=True) + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + # All labels should be valid (no masking) + assert (lambda_var.labels != -1).all() + + def test_dict_mask_without_linking_dim(self) -> None: + """Test dict case accepts mask that omits linking dimension but is broadcastable.""" + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + + breakpoints = xr.DataArray( + [[[0, 50], [0, 10]], [[80, 100], [20, 30]]], + dims=["segment", "var", "breakpoint"], + coords={ + "segment": [0, 1], + "var": ["power", "cost"], + "breakpoint": [0, 1], + }, + ) + + # Mask over segment/breakpoint only; should broadcast across var + mask = xr.DataArray( + [[True, True], [False, False]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + mask=mask, + ) + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert (lambda_var.labels.sel(segment=0) != -1).all() + assert (lambda_var.labels.sel(segment=1) == -1).all() + + +class TestDisjunctiveValidationErrors: + """Tests for validation errors in disjunctive constraints.""" + + def test_missing_dim(self) -> None: + """Test error when breakpoints don't have dim.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "wrong"], + coords={"segment": [0, 1], "wrong": [0, 1]}, + ) + + with pytest.raises(ValueError, match="must have dimension"): + m.add_disjunctive_piecewise_constraints(x, breakpoints, dim="breakpoint") + + def test_missing_segment_dim(self) -> None: + """Test error when breakpoints don't have segment_dim.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, 10, 50], + dims=["breakpoint"], + coords={"breakpoint": [0, 1, 2]}, + ) + + with pytest.raises(ValueError, match="must have dimension"): + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + def test_same_dim_segment_dim(self) -> None: + """Test error when dim == segment_dim.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + with pytest.raises(ValueError, match="must be different"): + m.add_disjunctive_piecewise_constraints( + x, breakpoints, dim="segment", segment_dim="segment" + ) + + def test_non_numeric_coords(self) -> None: + """Test error when dim coordinates are not numeric.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": ["a", "b"]}, + ) + + with pytest.raises(ValueError, match="numeric coordinates"): + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + def test_invalid_expr(self) -> None: + """Test error when expr is invalid type.""" + m = Model() + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + with pytest.raises( + TypeError, match="must be a Variable, LinearExpression, or dict" + ): + m.add_disjunctive_piecewise_constraints("invalid", breakpoints) # type: ignore + + def test_expression_support(self) -> None: + """Test that LinearExpression (x + y) works as input.""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x + y, breakpoints) + + assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_no_matching_linking_dim(self) -> None: + """Test error when no breakpoints dimension matches dict keys.""" + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + + breakpoints = xr.DataArray( + [[0, 50], [80, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + ) + + def test_linking_dim_coords_mismatch(self) -> None: + """Test error when breakpoint dimension coords don't match dict keys.""" + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + + breakpoints = xr.DataArray( + [[[0, 50], [0, 10]], [[80, 100], [20, 30]]], + dims=["segment", "var", "breakpoint"], + coords={ + "segment": [0, 1], + "var": ["wrong1", "wrong2"], + "breakpoint": [0, 1], + }, + ) + + with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + ) + + +class TestDisjunctiveNameGeneration: + """Tests for name generation in disjunctive constraints.""" + + def test_shared_counter_with_continuous(self) -> None: + """Test that disjunctive and continuous PWL share the counter.""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + + bp_continuous = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + m.add_piecewise_constraints(x, bp_continuous, dim="bp") + + bp_disjunctive = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + m.add_disjunctive_piecewise_constraints(y, bp_disjunctive) + + # First is pwl0, second is pwl1 + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl1{PWL_BINARY_SUFFIX}" in m.variables + + def test_custom_name(self) -> None: + """Test custom name for disjunctive constraints.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0, 10], [50, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints, name="my_dpwl") + + assert f"my_dpwl{PWL_BINARY_SUFFIX}" in m.variables + assert f"my_dpwl{PWL_SELECT_SUFFIX}" in m.constraints + assert f"my_dpwl{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"my_dpwl{PWL_CONVEX_SUFFIX}" in m.constraints + assert f"my_dpwl{PWL_LINK_SUFFIX}" in m.constraints + + +class TestDisjunctiveLPFileOutput: + """Tests for LP file output with disjunctive piecewise constraints.""" + + def test_lp_contains_sos2_and_binary(self, tmp_path: Path) -> None: + """Test LP file contains SOS2 section and binary variables.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0.0, 10.0], [50.0, 100.0]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + m.add_objective(x) + + fn = tmp_path / "dpwl.lp" + m.to_file(fn, io_api="lp") + content = fn.read_text() + + # Should contain SOS2 section + assert "\nsos\n" in content.lower() + assert "s2" in content.lower() + + # Should contain binary section + assert "binary" in content.lower() or "binaries" in content.lower() + + +class TestDisjunctiveMultiBreakpointSegments: + """Tests for segments with multiple breakpoints (unique to disjunctive formulation).""" + + def test_three_breakpoints_per_segment(self) -> None: + """Test segments with 3 breakpoints each — verify lambda shape.""" + m = Model() + x = m.add_variables(name="x") + + # 2 segments, each with 3 breakpoints + breakpoints = xr.DataArray( + [[0, 5, 10], [50, 75, 100]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + # Lambda should have shape (2 segments, 3 breakpoints) + assert lambda_var.labels.sizes["segment"] == 2 + assert lambda_var.labels.sizes["breakpoint"] == 3 + # All labels valid (no NaN) + assert (lambda_var.labels != -1).all() + + def test_mixed_segment_lengths_nan_padding(self) -> None: + """Test one segment with 4 breakpoints, another with 2 (NaN-padded).""" + m = Model() + x = m.add_variables(name="x") + + # Segment 0: 4 valid breakpoints + # Segment 1: 2 valid breakpoints + 2 NaN + breakpoints = xr.DataArray( + [[0, 5, 10, 15], [50, 100, np.nan, np.nan]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1, 2, 3]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + + # Lambda shape: (2 segments, 4 breakpoints) + assert lambda_var.labels.sizes["segment"] == 2 + assert lambda_var.labels.sizes["breakpoint"] == 4 + + # Segment 0: all 4 lambdas valid + assert (lambda_var.labels.sel(segment=0) != -1).all() + + # Segment 1: first 2 valid, last 2 masked + assert (lambda_var.labels.sel(segment=1, breakpoint=0) != -1).item() + assert (lambda_var.labels.sel(segment=1, breakpoint=1) != -1).item() + assert (lambda_var.labels.sel(segment=1, breakpoint=2) == -1).item() + assert (lambda_var.labels.sel(segment=1, breakpoint=3) == -1).item() + + # Both segment binaries valid (both have at least one valid breakpoint) + assert (binary_var.labels != -1).all() + + +_disjunctive_solvers = get_available_solvers_with_feature( + SolverFeature.SOS_CONSTRAINTS, available_solvers +) + + +@pytest.mark.skipif( + len(_disjunctive_solvers) == 0, + reason="No solver with SOS constraint support installed", +) +class TestDisjunctiveSolverIntegration: + """Integration tests for disjunctive piecewise constraints.""" + + @pytest.fixture(params=_disjunctive_solvers) + def solver_name(self, request: pytest.FixtureRequest) -> str: + return request.param + + def test_minimize_picks_low_segment(self, solver_name: str) -> None: + """Test minimizing x picks the lower segment.""" + m = Model() + x = m.add_variables(name="x") + + # Two segments: [0, 10] and [50, 100] + breakpoints = xr.DataArray( + [[0.0, 10.0], [50.0, 100.0]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + m.add_objective(x) + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + # Should pick x=0 (minimum of low segment) + assert np.isclose(x.solution.values, 0.0, atol=1e-5) + + def test_maximize_picks_high_segment(self, solver_name: str) -> None: + """Test maximizing x picks the upper segment.""" + m = Model() + x = m.add_variables(name="x") + + # Two segments: [0, 10] and [50, 100] + breakpoints = xr.DataArray( + [[0.0, 10.0], [50.0, 100.0]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + m.add_objective(x, sense="max") + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + # Should pick x=100 (maximum of high segment) + assert np.isclose(x.solution.values, 100.0, atol=1e-5) + + def test_dict_case_solver(self, solver_name: str) -> None: + """Test disjunctive with dict of variables and solver.""" + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + + # Two operating regions: + # Region 0: power [0,50], cost [0,10] + # Region 1: power [80,100], cost [20,30] + breakpoints = xr.DataArray( + [[[0.0, 50.0], [0.0, 10.0]], [[80.0, 100.0], [20.0, 30.0]]], + dims=["segment", "var", "breakpoint"], + coords={ + "segment": [0, 1], + "var": ["power", "cost"], + "breakpoint": [0, 1], + }, + ) + + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + ) + + # Minimize cost + m.add_objective(cost) + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + # Should pick region 0, minimum cost = 0 + assert np.isclose(cost.solution.values, 0.0, atol=1e-5) + assert np.isclose(power.solution.values, 0.0, atol=1e-5) + + def test_three_segments_min(self, solver_name: str) -> None: + """Test 3 segments, minimize picks lowest.""" + m = Model() + x = m.add_variables(name="x") + + # Three segments: [0, 10], [30, 50], [80, 100] + breakpoints = xr.DataArray( + [[0.0, 10.0], [30.0, 50.0], [80.0, 100.0]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1, 2], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + m.add_objective(x) + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + assert np.isclose(x.solution.values, 0.0, atol=1e-5) + + def test_constrained_mid_segment(self, solver_name: str) -> None: + """Test constraint forcing x into middle of a segment, verify interpolation.""" + m = Model() + x = m.add_variables(name="x") + + # Two segments: [0, 10] and [50, 100] + breakpoints = xr.DataArray( + [[0.0, 10.0], [50.0, 100.0]], + dims=["segment", "breakpoint"], + coords={"segment": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints) + + # Force x >= 60, so must be in segment 1 + m.add_constraints(x >= 60, name="x_lower") + m.add_objective(x) + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + # Minimum in segment 1 with x >= 60 → x = 60 + assert np.isclose(x.solution.values, 60.0, atol=1e-5) + + def test_multi_breakpoint_segment_solver(self, solver_name: str) -> None: + """Test segment with 3 breakpoints, verify correct interpolated value.""" + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + + # Both segments have 3 breakpoints (no NaN padding needed) + # Segment 0: 3-breakpoint curve (power [0,50,100], cost [0,10,50]) + # Segment 1: 3-breakpoint curve (power [200,250,300], cost [80,90,100]) + breakpoints = xr.DataArray( + [ + [[0.0, 50.0, 100.0], [0.0, 10.0, 50.0]], + [[200.0, 250.0, 300.0], [80.0, 90.0, 100.0]], + ], + dims=["segment", "var", "breakpoint"], + coords={ + "segment": [0, 1], + "var": ["power", "cost"], + "breakpoint": [0, 1, 2], + }, + ) + + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + ) + + # Constraint: power >= 50, minimize cost → picks segment 0, power=50, cost=10 + m.add_constraints(power >= 50, name="power_min") + m.add_constraints(power <= 150, name="power_max") + m.add_objective(cost) + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + assert np.isclose(power.solution.values, 50.0, atol=1e-5) + assert np.isclose(cost.solution.values, 10.0, atol=1e-5) + + def test_multi_generator_solver(self, solver_name: str) -> None: + """Test multiple generators with different disjunctive segments.""" + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + power = m.add_variables(lower=0, coords=[generators], name="power") + cost = m.add_variables(coords=[generators], name="cost") + + # gen1: two operating regions + # Region 0: power [0,50], cost [0,15] + # Region 1: power [80,100], cost [30,50] + # gen2: two operating regions + # Region 0: power [0,60], cost [0,10] + # Region 1: power [70,100], cost [12,40] + breakpoints = xr.DataArray( + [ + [[[0.0, 50.0], [0.0, 15.0]], [[80.0, 100.0], [30.0, 50.0]]], + [[[0.0, 60.0], [0.0, 10.0]], [[70.0, 100.0], [12.0, 40.0]]], + ], + dims=["generator", "segment", "var", "breakpoint"], + coords={ + "generator": generators, + "segment": [0, 1], + "var": ["power", "cost"], + "breakpoint": [0, 1], + }, + ) + + m.add_disjunctive_piecewise_constraints( + {"power": power, "cost": cost}, + breakpoints, + ) + + # Total power demand >= 100 + m.add_constraints(power.sum() >= 100, name="demand") + m.add_objective(cost.sum()) + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + total_power = power.solution.sum().values + assert total_power >= 100 - 1e-5 + + +_incremental_solvers = [s for s in ["gurobi", "highs"] if s in available_solvers] + + +@pytest.mark.skipif( + len(_incremental_solvers) == 0, + reason="No supported solver (gurobi/highs) installed", +) +class TestIncrementalSolverIntegrationMultiSolver: + """Integration tests for incremental formulation across solvers.""" + + @pytest.fixture(params=_incremental_solvers) + def solver_name(self, request: pytest.FixtureRequest) -> str: + return request.param + + def test_solve_incremental_single(self, solver_name: str) -> None: + m = Model() + x = m.add_variables(lower=0, upper=100, name="x") + cost = m.add_variables(name="cost") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0, 10, 50]], + dims=["var", "bp"], + coords={"var": ["x", "cost"], "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints( + {"x": x, "cost": cost}, + breakpoints, + dim="bp", + method="incremental", + ) + + m.add_constraints(x >= 50, name="x_min") + m.add_objective(cost) + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + assert np.isclose(x.solution.values, 50, atol=1e-5) + assert np.isclose(cost.solution.values, 10, atol=1e-5) + + +class TestIncrementalDecreasingBreakpointsSolver: + """Solver test for incremental formulation with decreasing breakpoints.""" + + @pytest.fixture(params=_incremental_solvers) + def solver_name(self, request: pytest.FixtureRequest) -> str: + return request.param + + def test_decreasing_breakpoints_solver(self, solver_name: str) -> None: + m = Model() + x = m.add_variables(lower=0, upper=100, name="x") + cost = m.add_variables(name="cost") + + breakpoints = xr.DataArray( + [[100, 50, 0], [50, 10, 0]], + dims=["var", "bp"], + coords={"var": ["x", "cost"], "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints( + {"x": x, "cost": cost}, + breakpoints, + dim="bp", + method="incremental", + ) + + m.add_constraints(x >= 50, name="x_min") + m.add_objective(cost) + + status, cond = m.solve(solver_name=solver_name) + + assert status == "ok" + assert np.isclose(x.solution.values, 50, atol=1e-5) + assert np.isclose(cost.solution.values, 10, atol=1e-5) + + +class TestIncrementalNonMonotonicDictRaises: + """Test that non-monotonic breakpoints in a dict raise ValueError.""" + + def test_non_monotonic_in_dict_raises(self) -> None: + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0, 30, 10]], + dims=["var", "bp"], + coords={"var": ["x", "y"], "bp": [0, 1, 2]}, + ) + + with pytest.raises(ValueError, match="strictly monotonic"): + m.add_piecewise_constraints( + {"x": x, "y": y}, + breakpoints, + dim="bp", + method="incremental", + ) + + +class TestAdditionalEdgeCases: + """Additional edge case tests identified in review.""" + + def test_nan_breakpoints_delta_mask(self) -> None: + """Verify delta mask correctly masks segments adjacent to trailing NaN breakpoints.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, 10, np.nan, np.nan], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + + delta_var = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] + assert delta_var.labels.sel(bp_seg=0).values != -1 + assert delta_var.labels.sel(bp_seg=1).values == -1 + assert delta_var.labels.sel(bp_seg=2).values == -1 + + def test_dict_with_linear_expressions(self) -> None: + """Test _build_stacked_expr with LinearExpression values (not just Variable).""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0, 10, 50]], + dims=["var", "bp"], + coords={"var": ["expr_a", "expr_b"], "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints( + {"expr_a": 2 * x, "expr_b": 3 * y}, + breakpoints, + dim="bp", + ) + + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_pwl_counter_increments(self) -> None: + """Test that _pwlCounter increments and produces unique names.""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + + m.add_piecewise_constraints(x, breakpoints, dim="bp") + assert m._pwlCounter == 1 + + m.add_piecewise_constraints(y, breakpoints, dim="bp") + assert m._pwlCounter == 2 + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl1{PWL_LAMBDA_SUFFIX}" in m.variables + + def test_auto_with_mixed_monotonicity_dict(self) -> None: + """Test method='auto' with opposite-direction slices in dict.""" + m = Model() + power = m.add_variables(name="power") + eff = m.add_variables(name="eff") + + breakpoints = xr.DataArray( + [[0, 50, 100], [0.95, 0.9, 0.8]], + dims=["var", "bp"], + coords={"var": ["power", "eff"], "bp": [0, 1, 2]}, + ) + + m.add_piecewise_constraints( + {"power": power, "eff": eff}, + breakpoints, + dim="bp", + method="auto", + ) + + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables + + def test_custom_segment_dim(self) -> None: + """Test disjunctive with custom segment_dim name.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [[0.0, 10.0], [50.0, 100.0]], + dims=["zone", "breakpoint"], + coords={"zone": [0, 1], "breakpoint": [0, 1]}, + ) + + m.add_disjunctive_piecewise_constraints(x, breakpoints, segment_dim="zone") + + assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables + assert f"pwl0{PWL_SELECT_SUFFIX}" in m.constraints + + def test_sos2_return_value_is_convexity_constraint(self) -> None: + """Test that add_piecewise_constraints (SOS2) returns the convexity constraint.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + + result = m.add_piecewise_constraints(x, breakpoints, dim="bp") + assert result.name == f"pwl0{PWL_CONVEX_SUFFIX}" + + def test_incremental_lp_no_sos2(self, tmp_path: Path) -> None: + """Test that incremental formulation LP file has no SOS2 section.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0.0, 10.0, 50.0], dims=["bp"], coords={"bp": [0, 1, 2]} + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + m.add_objective(x) + + fn = tmp_path / "inc.lp" + m.to_file(fn, io_api="lp") + content = fn.read_text() + + assert "\nsos\n" not in content.lower() + assert "s2" not in content.lower() + + def test_two_breakpoints_no_fill_constraint(self) -> None: + """Test 2-breakpoint incremental produces no fill constraint.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray([0, 100], dims=["bp"], coords={"bp": [0, 1]}) + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + + assert f"pwl0{PWL_FILL_SUFFIX}" not in m.constraints + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + def test_non_trailing_nan_incremental_raises(self) -> None: + """Non-trailing NaN breakpoints raise ValueError with method='incremental'.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, np.nan, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + ) + + with pytest.raises(ValueError, match="non-trailing NaN"): + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + + def test_non_trailing_nan_incremental_dict_raises(self) -> None: + """Dict case with one variable having non-trailing NaN raises.""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + + breakpoints = xr.DataArray( + [[0, 50, np.nan, 100], [0, 10, 50, 80]], + dims=["var", "bp"], + coords={"var": ["x", "y"], "bp": [0, 1, 2, 3]}, + ) + + with pytest.raises(ValueError, match="non-trailing NaN"): + m.add_piecewise_constraints( + {"x": x, "y": y}, + breakpoints, + dim="bp", + method="incremental", + ) + + def test_non_trailing_nan_falls_back_to_sos2(self) -> None: + """method='auto' falls back to SOS2 for non-trailing NaN.""" + m = Model() + x = m.add_variables(name="x") + + breakpoints = xr.DataArray( + [0, np.nan, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + ) + + m.add_piecewise_constraints(x, breakpoints, dim="bp", method="auto") + + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_DELTA_SUFFIX}" not in m.variables + + +class TestBreakpointsFactory: + def test_positional_list(self) -> None: + bp = breakpoints([0, 50, 100]) + assert bp.dims == ("breakpoint",) + assert list(bp.values) == [0.0, 50.0, 100.0] + assert list(bp.coords["breakpoint"].values) == [0, 1, 2] + + def test_positional_dict(self) -> None: + bp = breakpoints({"gen1": [0, 50, 100], "gen2": [0, 30]}, dim="generator") + assert set(bp.dims) == {"generator", "breakpoint"} + assert bp.sizes["generator"] == 2 + assert bp.sizes["breakpoint"] == 3 + assert np.isnan(bp.sel(generator="gen2", breakpoint=2)) + + def test_positional_dict_without_dim_raises(self) -> None: + with pytest.raises(ValueError, match="'dim' is required"): + breakpoints({"gen1": [0, 50], "gen2": [0, 30]}) + + def test_kwargs_uniform(self) -> None: + bp = breakpoints(power=[0, 50, 100], fuel=[10, 20, 30]) + assert "var" in bp.dims + assert "breakpoint" in bp.dims + assert list(bp.coords["var"].values) == ["power", "fuel"] + assert bp.sizes["breakpoint"] == 3 + + def test_kwargs_per_entity(self) -> None: + bp = breakpoints( + power={"gen1": [0, 50, 100], "gen2": [0, 30]}, + cost={"gen1": [0, 10, 50], "gen2": [0, 8]}, + dim="generator", + ) + assert "generator" in bp.dims + assert "var" in bp.dims + assert "breakpoint" in bp.dims + + def test_kwargs_mixed_list_and_dict(self) -> None: + bp = breakpoints( + power={"gen1": [0, 50], "gen2": [0, 30]}, + fuel=[10, 20], + dim="generator", + ) + assert "generator" in bp.dims + assert "var" in bp.dims + assert bp.sel(var="fuel", generator="gen1", breakpoint=0) == 10 + assert bp.sel(var="fuel", generator="gen2", breakpoint=0) == 10 + + def test_kwargs_dataarray_passthrough(self) -> None: + power_da = xr.DataArray([0, 50, 100], dims=["breakpoint"]) + bp = breakpoints(power=power_da, fuel=[10, 20, 30]) + assert "var" in bp.dims + assert bp.sel(var="power", breakpoint=0) == 0 + + def test_both_positional_and_kwargs_raises(self) -> None: + with pytest.raises(ValueError, match="Cannot pass both"): + breakpoints([0, 50], power=[10, 20]) + + def test_neither_raises(self) -> None: + with pytest.raises(ValueError, match="Must pass either"): + breakpoints() + + def test_invalid_values_type_raises(self) -> None: + with pytest.raises(TypeError, match="must be a list or dict"): + breakpoints(42) # type: ignore + + def test_invalid_kwarg_type_raises(self) -> None: + with pytest.raises(ValueError, match="must be a list, dict, or DataArray"): + breakpoints(power=42) # type: ignore + + def test_kwargs_dict_without_dim_raises(self) -> None: + with pytest.raises(ValueError, match="'dim' is required"): + breakpoints(power={"gen1": [0, 50]}, cost=[10, 20]) + + def test_factory_output_works_with_piecewise(self) -> None: + m = Model() + x = m.add_variables(name="x") + bp = breakpoints([0, 10, 50]) + m.add_piecewise_constraints(x, bp, dim="breakpoint") + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + + def test_factory_dict_output_works_with_piecewise(self) -> None: + m = Model() + power = m.add_variables(name="power") + cost = m.add_variables(name="cost") + bp = breakpoints(power=[0, 50, 100], cost=[0, 10, 50]) + m.add_piecewise_constraints( + {"power": power, "cost": cost}, bp, dim="breakpoint" + ) + assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + + +class TestBreakpointsSegments: + def test_list_of_tuples(self) -> None: + bp = breakpoints.segments([(0, 10), (50, 100)]) + assert set(bp.dims) == {"segment", "breakpoint"} + assert bp.sizes["segment"] == 2 + assert bp.sizes["breakpoint"] == 2 + + def test_ragged_segments(self) -> None: + bp = breakpoints.segments([(0, 5, 10), (50, 100)]) + assert bp.sizes["breakpoint"] == 3 + assert np.isnan(bp.sel(segment=1, breakpoint=2)) + + def test_per_entity_dict(self) -> None: + bp = breakpoints.segments( + {"gen1": [(0, 10), (50, 100)], "gen2": [(0, 20), (60, 90)]}, + dim="generator", + ) + assert "generator" in bp.dims + assert "segment" in bp.dims + assert "breakpoint" in bp.dims + + def test_kwargs_multi_variable(self) -> None: + bp = breakpoints.segments( + power=[(0, 50), (80, 100)], + cost=[(0, 10), (20, 30)], + ) + assert "segment" in bp.dims + assert "var" in bp.dims + assert "breakpoint" in bp.dims + + def test_segments_invalid_values_type_raises(self) -> None: + with pytest.raises(TypeError, match="must be a list or dict"): + breakpoints.segments(42) # type: ignore + + def test_segments_both_positional_and_kwargs_raises(self) -> None: + with pytest.raises(ValueError, match="Cannot pass both"): + breakpoints.segments([(0, 10)], power=[(0, 10)]) + + def test_segments_neither_raises(self) -> None: + with pytest.raises(ValueError, match="Must pass either"): + breakpoints.segments() + + def test_segments_invalid_kwarg_type_raises(self) -> None: + with pytest.raises(ValueError, match="must be a list, dict, or DataArray"): + breakpoints.segments(power=42) # type: ignore + + def test_segments_kwargs_dict_without_dim_raises(self) -> None: + with pytest.raises(ValueError, match="'dim' is required"): + breakpoints.segments(power={"gen1": [(0, 50)]}, cost=[(10, 20)]) + + def test_segments_dict_without_dim_raises(self) -> None: + with pytest.raises(ValueError, match="'dim' is required"): + breakpoints.segments({"gen1": [(0, 10)], "gen2": [(50, 100)]}) + + def test_segments_works_with_disjunctive(self) -> None: + m = Model() + x = m.add_variables(name="x") + bp = breakpoints.segments([(0, 10), (50, 100)]) + m.add_disjunctive_piecewise_constraints(x, bp) + assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables + + +class TestAutobroadcast: + def test_1d_breakpoints_2d_variable(self) -> None: + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + x = m.add_variables(coords=[generators], name="x") + bp = breakpoints([0, 10, 50]) + m.add_piecewise_constraints(x, bp, dim="breakpoint") + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in lambda_var.dims + assert "breakpoint" in lambda_var.dims + + def test_already_matching_dims_noop(self) -> None: + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + x = m.add_variables(coords=[generators], name="x") + bp = xr.DataArray( + [[0, 50, 100], [0, 30, 80]], + dims=["generator", "bp"], + coords={"generator": generators, "bp": [0, 1, 2]}, + ) + m.add_piecewise_constraints(x, bp, dim="bp") + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in lambda_var.dims + + def test_dict_expr_broadcast(self) -> None: + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + power = m.add_variables(coords=[generators], name="power") + cost = m.add_variables(coords=[generators], name="cost") + bp = breakpoints(power=[0, 50, 100], cost=[0, 10, 50]) + m.add_piecewise_constraints( + {"power": power, "cost": cost}, bp, dim="breakpoint" + ) + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in lambda_var.dims + + def test_disjunctive_broadcast(self) -> None: + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + x = m.add_variables(coords=[generators], name="x") + bp = breakpoints.segments([(0, 10), (50, 100)]) + m.add_disjunctive_piecewise_constraints(x, bp) + binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + assert "generator" in binary_var.dims + + def test_broadcast_multi_dim(self) -> None: + m = Model() + generators = pd.Index(["gen1", "gen2"], name="generator") + timesteps = pd.Index([0, 1, 2], name="time") + x = m.add_variables(coords=[generators, timesteps], name="x") + bp = breakpoints([0, 10, 50]) + m.add_piecewise_constraints(x, bp, dim="breakpoint") + lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in lambda_var.dims + assert "time" in lambda_var.dims diff --git a/test/test_solvers.py b/test/test_solvers.py index 7f8b01f2..7f4d55ec 100644 --- a/test/test_solvers.py +++ b/test/test_solvers.py @@ -45,6 +45,18 @@ ENDATA """ +free_lp_problem = """ +Maximize + z: 3 x + 4 y +Subject To + c1: 2 x + y <= 10 + c2: x + 2 y <= 12 +Bounds + 0 <= x + 0 <= y +End +""" + @pytest.mark.parametrize("solver", set(solvers.available_solvers)) def test_free_mps_solution_parsing(solver: str, tmp_path: Path) -> None: @@ -71,6 +83,88 @@ def test_free_mps_solution_parsing(solver: str, tmp_path: Path) -> None: assert result.solution.objective == 30.0 +@pytest.mark.skipif( + "knitro" not in set(solvers.available_solvers), reason="Knitro is not installed" +) +def test_knitro_solver_mps(tmp_path: Path) -> None: + """Test Knitro solver with a simple MPS problem.""" + knitro = solvers.Knitro() + + mps_file = tmp_path / "problem.mps" + mps_file.write_text(free_mps_problem) + sol_file = tmp_path / "solution.sol" + + result = knitro.solve_problem(problem_fn=mps_file, solution_fn=sol_file) + + assert result.status.is_ok + assert result.solution is not None + assert result.solution.objective == 30.0 + + +@pytest.mark.skipif( + "knitro" not in set(solvers.available_solvers), reason="Knitro is not installed" +) +def test_knitro_solver_for_lp(tmp_path: Path) -> None: + """Test Knitro solver with a simple LP problem.""" + knitro = solvers.Knitro() + + lp_file = tmp_path / "problem.lp" + lp_file.write_text(free_lp_problem) + sol_file = tmp_path / "solution.sol" + + result = knitro.solve_problem(problem_fn=lp_file, solution_fn=sol_file) + + assert result.status.is_ok + assert result.solution is not None + assert result.solution.objective == pytest.approx(26.666, abs=1e-3) + + +@pytest.mark.skipif( + "knitro" not in set(solvers.available_solvers), reason="Knitro is not installed" +) +def test_knitro_solver_with_options(tmp_path: Path) -> None: + """Test Knitro solver with custom options.""" + knitro = solvers.Knitro(maxit=100, feastol=1e-6) + + mps_file = tmp_path / "problem.mps" + mps_file.write_text(free_mps_problem) + sol_file = tmp_path / "solution.sol" + log_file = tmp_path / "knitro.log" + + result = knitro.solve_problem( + problem_fn=mps_file, solution_fn=sol_file, log_fn=log_file + ) + assert result.status.is_ok + + +@pytest.mark.skipif( + "knitro" not in set(solvers.available_solvers), reason="Knitro is not installed" +) +def test_knitro_solver_with_model_raises_error(model: Model) -> None: # noqa: F811 + """Test Knitro solver raises NotImplementedError for model-based solving.""" + knitro = solvers.Knitro() + with pytest.raises( + NotImplementedError, match="Direct API not implemented for Knitro" + ): + knitro.solve_problem(model=model) + + +@pytest.mark.skipif( + "knitro" not in set(solvers.available_solvers), reason="Knitro is not installed" +) +def test_knitro_solver_no_log(tmp_path: Path) -> None: + """Test Knitro solver without log file.""" + knitro = solvers.Knitro(outlev=0) + + mps_file = tmp_path / "problem.mps" + mps_file.write_text(free_mps_problem) + sol_file = tmp_path / "solution.sol" + + result = knitro.solve_problem(problem_fn=mps_file, solution_fn=sol_file) + + assert result.status.is_ok + + @pytest.mark.skipif( "gurobi" not in set(solvers.available_solvers), reason="Gurobi is not installed" ) diff --git a/test/test_sos_reformulation.py b/test/test_sos_reformulation.py new file mode 100644 index 00000000..f45ea706 --- /dev/null +++ b/test/test_sos_reformulation.py @@ -0,0 +1,818 @@ +"""Tests for SOS constraint reformulation.""" + +from __future__ import annotations + +import numpy as np +import pandas as pd +import pytest + +from linopy import Model, available_solvers +from linopy.constants import SOS_TYPE_ATTR +from linopy.sos_reformulation import ( + compute_big_m_values, + reformulate_sos1, + reformulate_sos2, + reformulate_sos_constraints, + undo_sos_reformulation, +) + + +class TestValidateBounds: + """Tests for bound validation in compute_big_m_values.""" + + def test_finite_bounds_pass(self) -> None: + """Finite non-negative bounds should pass validation.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + compute_big_m_values(x) # Should not raise + + def test_infinite_upper_bounds_raise(self) -> None: + """Infinite upper bounds should raise ValueError.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=np.inf, coords=[idx], name="x") + with pytest.raises(ValueError, match="infinite upper bounds"): + compute_big_m_values(x) + + def test_negative_lower_bounds_raise(self) -> None: + """Negative lower bounds should raise ValueError.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=-1, upper=1, coords=[idx], name="x") + with pytest.raises(ValueError, match="negative lower bounds"): + compute_big_m_values(x) + + def test_mixed_negative_lower_bounds_raise(self) -> None: + """Mixed finite/negative lower bounds should raise ValueError.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables( + lower=np.array([0, -1, 0]), + upper=np.array([1, 1, 1]), + coords=[idx], + name="x", + ) + with pytest.raises(ValueError, match="negative lower bounds"): + compute_big_m_values(x) + + +class TestComputeBigM: + """Tests for compute_big_m_values.""" + + def test_positive_bounds(self) -> None: + """Test Big-M computation with positive bounds.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=10, coords=[idx], name="x") + M = compute_big_m_values(x) + assert np.allclose(M.values, [10, 10, 10]) + + def test_varying_bounds(self) -> None: + """Test Big-M computation with varying upper bounds.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables( + lower=np.array([0, 0, 0]), + upper=np.array([1, 2, 3]), + coords=[idx], + name="x", + ) + M = compute_big_m_values(x) + assert np.allclose(M.values, [1, 2, 3]) + + def test_custom_big_m_scalar(self) -> None: + """Test Big-M uses tighter of custom value and bounds.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=100, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=10) + M = compute_big_m_values(x) + # M = min(10, 100) = 10 (custom is tighter) + assert np.allclose(M.values, [10, 10, 10]) + + def test_custom_big_m_allows_infinite_bounds(self) -> None: + """Test that custom big_m allows variables with infinite bounds.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=np.inf, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=10) + # Should not raise - custom big_m makes result finite + M = compute_big_m_values(x) + assert np.allclose(M.values, [10, 10, 10]) + + +class TestSOS1Reformulation: + """Tests for SOS1 reformulation.""" + + def test_basic_sos1(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + reformulate_sos1(m, x, "_test_") + m.remove_sos_constraints(x) + + # Check auxiliary variables and constraints were added + assert "_test_x_y" in m.variables + assert "_test_x_upper" in m.constraints + assert "_test_x_card" in m.constraints + + # Binary variable should have same dimensions + y = m.variables["_test_x_y"] + assert y.dims == x.dims + assert y.sizes == x.sizes + + def test_sos1_multidimensional(self) -> None: + m = Model() + idx_i = pd.Index([0, 1, 2], name="i") + idx_j = pd.Index([0, 1], name="j") + x = m.add_variables(lower=0, upper=1, coords=[idx_i, idx_j], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + reformulate_sos1(m, x, "_test_") + m.remove_sos_constraints(x) + + # Binary variable should have same dimensions + y = m.variables["_test_x_y"] + assert set(y.dims) == {"i", "j"} + + # Cardinality constraint should have reduced dimensions (summed over i) + card_con = m.constraints["_test_x_card"] + assert "j" in card_con.dims + + +class TestSOS2Reformulation: + """Tests for SOS2 reformulation.""" + + def test_basic_sos2(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=2, sos_dim="i") + + reformulate_sos2(m, x, "_test_") + m.remove_sos_constraints(x) + + # Check auxiliary variables and constraints were added + assert "_test_x_z" in m.variables + assert "_test_x_upper_first" in m.constraints + assert "_test_x_upper_last" in m.constraints + assert "_test_x_card" in m.constraints + + # Segment indicators should have n-1 elements + z = m.variables["_test_x_z"] + assert z.sizes["i"] == 2 # n-1 = 3-1 = 2 + + def test_sos2_trivial_single_element(self) -> None: + m = Model() + idx = pd.Index([0], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=2, sos_dim="i") + + reformulate_sos2(m, x, "_test_") + + assert "_test_x_z" not in m.variables + + def test_sos2_two_elements(self) -> None: + m = Model() + idx = pd.Index([0, 1], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=2, sos_dim="i") + + reformulate_sos2(m, x, "_test_") + m.remove_sos_constraints(x) + + # Should have 1 segment indicator + z = m.variables["_test_x_z"] + assert z.sizes["i"] == 1 + + def test_sos2_with_middle_constraints(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2, 3], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=2, sos_dim="i") + + reformulate_sos2(m, x, "_test_") + m.remove_sos_constraints(x) + + assert "_test_x_upper_first" in m.constraints + assert "_test_x_upper_mid" in m.constraints + assert "_test_x_upper_last" in m.constraints + + def test_sos2_multidimensional(self) -> None: + m = Model() + idx_i = pd.Index([0, 1, 2], name="i") + idx_j = pd.Index([0, 1], name="j") + x = m.add_variables(lower=0, upper=1, coords=[idx_i, idx_j], name="x") + m.add_sos_constraints(x, sos_type=2, sos_dim="i") + + reformulate_sos2(m, x, "_test_") + m.remove_sos_constraints(x) + + # Segment indicator should have (n-1) elements in i dimension, same j dimension + z = m.variables["_test_x_z"] + assert set(z.dims) == {"i", "j"} + assert z.sizes["i"] == 2 # n-1 = 3-1 = 2 + assert z.sizes["j"] == 2 + + # Cardinality constraint should have j dimension preserved + card_con = m.constraints["_test_x_card"] + assert "j" in card_con.dims + + +class TestReformulateAllSOS: + """Tests for reformulate_all_sos.""" + + def test_reformulate_single_sos1(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + result = reformulate_sos_constraints(m) + + assert result.reformulated == ["x"] + assert len(list(m.variables.sos)) == 0 + + def test_reformulate_multiple_sos(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + y = m.add_variables(lower=0, upper=2, coords=[idx], name="y") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_sos_constraints(y, sos_type=2, sos_dim="i") + + result = reformulate_sos_constraints(m) + + assert set(result.reformulated) == {"x", "y"} + assert len(list(m.variables.sos)) == 0 + + def test_reformulate_removes_sos_attrs_for_single_element(self) -> None: + m = Model() + idx = pd.Index([0], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + result = reformulate_sos_constraints(m) + + assert result.reformulated == ["x"] + assert len(list(m.variables.sos)) == 0 + assert len(result.added_variables) == 0 + assert len(result.added_constraints) == 0 + + def test_reformulate_removes_sos_attrs_for_zero_bounds(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=0, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + result = reformulate_sos_constraints(m) + + assert result.reformulated == ["x"] + assert len(list(m.variables.sos)) == 0 + assert len(result.added_variables) == 0 + assert len(result.added_constraints) == 0 + + def test_reformulate_raises_on_infinite_bounds(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=np.inf, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + with pytest.raises(ValueError, match="infinite"): + reformulate_sos_constraints(m) + + def test_reformulate_raises_on_negative_lower_bounds(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=-1, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + with pytest.raises(ValueError, match="negative lower bounds"): + reformulate_sos_constraints(m) + + +class TestModelReformulateSOS: + """Tests for Model.reformulate_sos_constraints method.""" + + def test_reformulate_inplace(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + result = m.reformulate_sos_constraints() + + assert result.reformulated == ["x"] + assert len(list(m.variables.sos)) == 0 + assert "_sos_reform_x_y" in m.variables + + +@pytest.mark.skipif("highs" not in available_solvers, reason="HiGHS not installed") +class TestSolveWithReformulation: + """Tests for solving with SOS reformulation.""" + + def test_sos1_maximize_with_highs(self) -> None: + """Test SOS1 maximize problem with HiGHS using reformulation.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_objective(x * np.array([1, 2, 3]), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + + # Should maximize by choosing x[2] = 1 + assert np.isclose(x.solution.values[2], 1, atol=1e-5) + assert np.isclose(x.solution.values[0], 0, atol=1e-5) + assert np.isclose(x.solution.values[1], 0, atol=1e-5) + assert m.objective.value is not None + assert np.isclose(m.objective.value, 3, atol=1e-5) + + def test_sos1_minimize_with_highs(self) -> None: + """Test SOS1 minimize problem with HiGHS using reformulation.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_objective(x * np.array([3, 2, 1]), sense="min") + + m.solve(solver_name="highs", reformulate_sos=True) + + # Should minimize to 0 by setting all x = 0 + assert m.objective.value is not None + assert np.isclose(m.objective.value, 0, atol=1e-5) + + def test_sos2_maximize_with_highs(self) -> None: + """Test SOS2 maximize problem with HiGHS using reformulation.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=2, sos_dim="i") + m.add_objective(x * np.array([1, 2, 3]), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + + # SOS2 allows two adjacent non-zeros, so x[1] and x[2] can both be 1 + # Maximum is 2 + 3 = 5 + assert m.objective.value is not None + assert np.isclose(m.objective.value, 5, atol=1e-5) + # Check that at most two adjacent variables are non-zero + nonzero_count = (np.abs(x.solution.values) > 1e-5).sum() + assert nonzero_count <= 2 + + def test_sos2_different_coefficients(self) -> None: + """Test SOS2 with different coefficients.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=2, sos_dim="i") + m.add_objective(x * np.array([2, 1, 3]), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + + # Best is x[1]=1 and x[2]=1 giving 1+3=4 + # or x[0]=1 and x[1]=1 giving 2+1=3 + assert m.objective.value is not None + assert np.isclose(m.objective.value, 4, atol=1e-5) + + def test_reformulate_sos_false_raises_error(self) -> None: + """Test that HiGHS without reformulate_sos raises error.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_objective(x.sum(), sense="max") + + with pytest.raises(ValueError, match="does not support SOS"): + m.solve(solver_name="highs", reformulate_sos=False) + + def test_multidimensional_sos1_with_highs(self) -> None: + """Test multi-dimensional SOS1 with HiGHS.""" + m = Model() + idx_i = pd.Index([0, 1, 2], name="i") + idx_j = pd.Index([0, 1], name="j") + x = m.add_variables(lower=0, upper=1, coords=[idx_i, idx_j], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_objective(x.sum(), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + + # For each j, at most one x[i, j] can be non-zero + # Maximum is achieved by one non-zero per j column: 2 total + assert m.objective.value is not None + assert np.isclose(m.objective.value, 2, atol=1e-5) + + # Check SOS1 is satisfied for each j + for j in idx_j: + nonzero_count = (np.abs(x.solution.sel(j=j).values) > 1e-5).sum() + assert nonzero_count <= 1 + + def test_multidimensional_sos2_with_highs(self) -> None: + """Test multi-dimensional SOS2 with HiGHS.""" + m = Model() + idx_i = pd.Index([0, 1, 2], name="i") + idx_j = pd.Index([0, 1], name="j") + x = m.add_variables(lower=0, upper=1, coords=[idx_i, idx_j], name="x") + m.add_sos_constraints(x, sos_type=2, sos_dim="i") + m.add_objective(x.sum(), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + + # For each j, at most two adjacent x[i, j] can be non-zero + # Maximum is achieved by two adjacent non-zeros per j column: 4 total + assert m.objective.value is not None + assert np.isclose(m.objective.value, 4, atol=1e-5) + + # Check SOS2 is satisfied for each j + for j in idx_j: + sol_j = x.solution.sel(j=j).values + nonzero_indices = np.where(np.abs(sol_j) > 1e-5)[0] + # At most 2 non-zeros + assert len(nonzero_indices) <= 2 + # If 2 non-zeros, they must be adjacent + if len(nonzero_indices) == 2: + assert abs(nonzero_indices[1] - nonzero_indices[0]) == 1 + + +@pytest.mark.skipif("gurobi" not in available_solvers, reason="Gurobi not installed") +class TestEquivalenceWithGurobi: + """Tests comparing reformulated solutions with native Gurobi SOS.""" + + def test_sos1_equivalence(self) -> None: + """Test that reformulated SOS1 gives same result as native Gurobi.""" + gurobipy = pytest.importorskip("gurobipy") + + # Native Gurobi solution + m1 = Model() + idx = pd.Index([0, 1, 2], name="i") + x1 = m1.add_variables(lower=0, upper=1, coords=[idx], name="x") + m1.add_sos_constraints(x1, sos_type=1, sos_dim="i") + m1.add_objective(x1 * np.array([1, 2, 3]), sense="max") + + try: + m1.solve(solver_name="gurobi") + except gurobipy.GurobiError as exc: + pytest.skip(f"Gurobi environment unavailable: {exc}") + + # Reformulated solution with HiGHS + m2 = Model() + x2 = m2.add_variables(lower=0, upper=1, coords=[idx], name="x") + m2.add_sos_constraints(x2, sos_type=1, sos_dim="i") + m2.add_objective(x2 * np.array([1, 2, 3]), sense="max") + + if "highs" in available_solvers: + m2.solve(solver_name="highs", reformulate_sos=True) + assert m1.objective.value is not None + assert m2.objective.value is not None + assert np.isclose(m1.objective.value, m2.objective.value, atol=1e-5) + + def test_sos2_equivalence(self) -> None: + """Test that reformulated SOS2 gives same result as native Gurobi.""" + gurobipy = pytest.importorskip("gurobipy") + + # Native Gurobi solution + m1 = Model() + idx = pd.Index([0, 1, 2], name="i") + x1 = m1.add_variables(lower=0, upper=1, coords=[idx], name="x") + m1.add_sos_constraints(x1, sos_type=2, sos_dim="i") + m1.add_objective(x1 * np.array([1, 2, 3]), sense="max") + + try: + m1.solve(solver_name="gurobi") + except gurobipy.GurobiError as exc: + pytest.skip(f"Gurobi environment unavailable: {exc}") + + # Reformulated solution with HiGHS + m2 = Model() + x2 = m2.add_variables(lower=0, upper=1, coords=[idx], name="x") + m2.add_sos_constraints(x2, sos_type=2, sos_dim="i") + m2.add_objective(x2 * np.array([1, 2, 3]), sense="max") + + if "highs" in available_solvers: + m2.solve(solver_name="highs", reformulate_sos=True) + assert m1.objective.value is not None + assert m2.objective.value is not None + assert np.isclose(m1.objective.value, m2.objective.value, atol=1e-5) + + +class TestEdgeCases: + """Tests for edge cases.""" + + def test_preserves_non_sos_variables(self) -> None: + """Test that non-SOS variables are preserved.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_variables(lower=0, upper=2, coords=[idx], name="y") # No SOS + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + reformulate_sos_constraints(m) + + # y should be unchanged + assert "y" in m.variables + assert SOS_TYPE_ATTR not in m.variables["y"].attrs + + def test_custom_prefix(self) -> None: + """Test custom prefix for reformulation.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + reformulate_sos_constraints(m, prefix="_custom_") + + assert "_custom_x_y" in m.variables + assert "_custom_x_upper" in m.constraints + assert "_custom_x_card" in m.constraints + + def test_constraints_with_sos_variables(self) -> None: + """Test that existing constraints with SOS variables work after reformulation.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + y = m.add_variables(lower=0, upper=10, name="y") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + # Add constraint involving SOS variable + m.add_constraints(x.sum() <= y, name="linking") + + # Reformulate + reformulate_sos_constraints(m) + + # Original constraint should still exist + assert "linking" in m.constraints + + def test_float_coordinates(self) -> None: + """Test SOS with float coordinates (common for piecewise linear).""" + m = Model() + breakpoints = pd.Index([0.0, 0.5, 1.0], name="bp") + lambdas = m.add_variables(lower=0, upper=1, coords=[breakpoints], name="lambda") + m.add_sos_constraints(lambdas, sos_type=2, sos_dim="bp") + + reformulate_sos_constraints(m) + + # Should work with float coordinates + assert "_sos_reform_lambda_z" in m.variables + z = m.variables["_sos_reform_lambda_z"] + # Segment indicators have n-1 = 2 elements + assert z.sizes["bp"] == 2 + + def test_custom_big_m_removed_on_remove_sos(self) -> None: + """Test that custom big_m attribute is removed with SOS constraint.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=100, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=10) + + assert "big_m_upper" in x.attrs + + m.remove_sos_constraints(x) + + assert "big_m_upper" not in x.attrs + + +@pytest.mark.skipif("highs" not in available_solvers, reason="HiGHS not installed") +class TestCustomBigM: + """Tests for custom Big-M functionality.""" + + def test_solve_with_custom_big_m(self) -> None: + """Test solving with custom big_m value.""" + m = Model() + idx = pd.Index([0, 1, 2], name="i") + # Large bounds but tight effective constraint + x = m.add_variables(lower=0, upper=1000, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=1) + m.add_objective(x * np.array([1, 2, 3]), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + + # With big_m=1, maximum should be 3 (x[2]=1) + assert m.objective.value is not None + assert np.isclose(m.objective.value, 3, atol=1e-5) + + def test_solve_with_infinite_bounds_and_custom_big_m(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=np.inf, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=5) + m.add_objective(x * np.array([1, 2, 3]), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + + assert m.objective.value is not None + assert np.isclose(m.objective.value, 15, atol=1e-5) + + def test_solve_does_not_mutate_model(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_objective(x * np.array([1, 2, 3]), sense="max") + + vars_before = set(m.variables) + cons_before = set(m.constraints) + sos_before = list(m.variables.sos) + + m.solve(solver_name="highs", reformulate_sos=True) + + assert set(m.variables) == vars_before + assert set(m.constraints) == cons_before + assert list(m.variables.sos) == sos_before + + def test_solve_twice_with_reformulate_sos(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_objective(x * np.array([1, 2, 3]), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + obj1 = m.objective.value + + m.solve(solver_name="highs", reformulate_sos=True) + obj2 = m.objective.value + + assert obj1 is not None and obj2 is not None + assert np.isclose(obj1, obj2, atol=1e-5) + + +@pytest.mark.skipif("highs" not in available_solvers, reason="HiGHS not installed") +class TestNoSosConstraints: + def test_reformulate_sos_true_with_no_sos(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_objective(x.sum(), sense="max") + + m.solve(solver_name="highs", reformulate_sos=True) + + assert m.objective.value is not None + assert np.isclose(m.objective.value, 3, atol=1e-5) + + +class TestPartialFailure: + def test_partial_failure_rolls_back(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + y = m.add_variables(lower=-1, upper=1, coords=[idx], name="y") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_sos_constraints(y, sos_type=1, sos_dim="i") + + vars_before = set(m.variables) + cons_before = set(m.constraints) + sos_before = list(m.variables.sos) + + with pytest.raises(ValueError, match="negative lower bounds"): + reformulate_sos_constraints(m) + + assert set(m.variables) == vars_before + assert set(m.constraints) == cons_before + assert list(m.variables.sos) == sos_before + + +class TestMixedBounds: + def test_mixed_finite_infinite_with_big_m(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables( + lower=np.array([0, 0, 0]), + upper=np.array([5, np.inf, 10]), + coords=[idx], + name="x", + ) + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=8) + M = compute_big_m_values(x) + assert np.allclose(M.values, [5, 8, 8]) + + def test_mixed_finite_infinite_without_big_m_raises(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables( + lower=np.array([0, 0, 0]), + upper=np.array([5, np.inf, 10]), + coords=[idx], + name="x", + ) + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + with pytest.raises(ValueError, match="infinite upper bounds"): + compute_big_m_values(x) + + +class TestBigMValidation: + def test_big_m_zero_raises(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + with pytest.raises(ValueError, match="big_m must be positive"): + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=0) + + def test_big_m_negative_raises(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + with pytest.raises(ValueError, match="big_m must be positive"): + m.add_sos_constraints(x, sos_type=1, sos_dim="i", big_m=-5) + + +class TestUndoReformulation: + def test_undo_restores_sos_attrs(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + result = reformulate_sos_constraints(m) + + assert len(list(m.variables.sos)) == 0 + assert "_sos_reform_x_y" in m.variables + + undo_sos_reformulation(m, result) + + assert list(m.variables.sos) == ["x"] + assert "_sos_reform_x_y" not in m.variables + assert "_sos_reform_x_upper" not in m.constraints + assert "_sos_reform_x_card" not in m.constraints + + def test_double_reformulate_is_noop(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + m.reformulate_sos_constraints() + + result2 = m.reformulate_sos_constraints() + assert result2.reformulated == [] + + def test_undo_restores_skipped_single_element(self) -> None: + m = Model() + idx = pd.Index([0], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + result = reformulate_sos_constraints(m) + + assert len(list(m.variables.sos)) == 0 + + undo_sos_reformulation(m, result) + + assert list(m.variables.sos) == ["x"] + + def test_undo_restores_skipped_zero_bounds(self) -> None: + m = Model() + idx = pd.Index([0, 1, 2], name="i") + x = m.add_variables(lower=0, upper=0, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + + result = reformulate_sos_constraints(m) + + assert len(list(m.variables.sos)) == 0 + + undo_sos_reformulation(m, result) + + assert list(m.variables.sos) == ["x"] + + +@pytest.mark.skipif("highs" not in available_solvers, reason="HiGHS not installed") +class TestUnsortedCoords: + def test_sos2_unsorted_coords_matches_sorted(self) -> None: + coeffs = np.array([1, 2, 3]) + + m_sorted = Model() + idx_sorted = pd.Index([1, 2, 3], name="i") + x_sorted = m_sorted.add_variables( + lower=0, upper=1, coords=[idx_sorted], name="x" + ) + m_sorted.add_sos_constraints(x_sorted, sos_type=2, sos_dim="i") + m_sorted.add_objective(x_sorted * coeffs, sense="max") + m_sorted.solve(solver_name="highs", reformulate_sos=True) + + m_unsorted = Model() + idx_unsorted = pd.Index([3, 1, 2], name="i") + x_unsorted = m_unsorted.add_variables( + lower=0, upper=1, coords=[idx_unsorted], name="x" + ) + m_unsorted.add_sos_constraints(x_unsorted, sos_type=2, sos_dim="i") + m_unsorted.add_objective(x_unsorted * coeffs, sense="max") + m_unsorted.solve(solver_name="highs", reformulate_sos=True) + + assert m_sorted.objective.value is not None + assert m_unsorted.objective.value is not None + assert np.isclose( + m_sorted.objective.value, m_unsorted.objective.value, atol=1e-5 + ) + + def test_sos1_unsorted_coords(self) -> None: + m = Model() + idx = pd.Index([3, 1, 2], name="i") + x = m.add_variables(lower=0, upper=1, coords=[idx], name="x") + m.add_sos_constraints(x, sos_type=1, sos_dim="i") + m.add_objective(x * np.array([1, 2, 3]), sense="max") + m.solve(solver_name="highs", reformulate_sos=True) + + assert m.objective.value is not None + assert np.isclose(m.objective.value, 3, atol=1e-5) diff --git a/test/test_variables.py b/test/test_variables.py index 3984b091..37de6aff 100644 --- a/test/test_variables.py +++ b/test/test_variables.py @@ -107,6 +107,39 @@ def test_variables_nvars(m: Model) -> None: assert m.variables.nvars == 19 +def test_variables_mask_broadcast() -> None: + m = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + + mask = pd.Series([True] * 5 + [False] * 5) + x = m.add_variables(lower, upper, name="x", mask=mask) + assert (x.labels[0:5, :] != -1).all() + assert (x.labels[5:10, :] == -1).all() + + mask2 = xr.DataArray([True] * 5 + [False] * 5, dims=["dim_1"]) + y = m.add_variables(lower, upper, name="y", mask=mask2) + assert (y.labels[:, 0:5] != -1).all() + assert (y.labels[:, 5:10] == -1).all() + + mask3 = xr.DataArray( + [True, True, False, False, False], + dims=["dim_0"], + coords={"dim_0": range(5)}, + ) + with pytest.warns(FutureWarning, match="Missing values will be filled"): + z = m.add_variables(lower, upper, name="z", mask=mask3) + assert (z.labels[0:2, :] != -1).all() + assert (z.labels[2:5, :] == -1).all() + assert (z.labels[5:10, :] == -1).all() + + # Mask with extra dimension not in data should raise + mask4 = xr.DataArray([True, False], dims=["extra_dim"]) + with pytest.raises(AssertionError, match="not a subset"): + m.add_variables(lower, upper, name="w", mask=mask4) + + def test_variables_get_name_by_label(m: Model) -> None: assert m.variables.get_name_by_label(4) == "x" assert m.variables.get_name_by_label(12) == "y"