Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
db487c7
Fix indexing of Power_Demand in kmedoids_clustering
FelixCAAuer Nov 20, 2025
233aa03
Fix import for Utilities
FelixCAAuer Nov 20, 2025
8b0f7e6
Remove return-type from apply_kmedoids to avoid circular imports
FelixCAAuer Nov 24, 2025
295fdd4
Fix inplace-adjustment of CaseStudy with kmedoids
FelixCAAuer Nov 24, 2025
dc86e65
Improve copy and implement equal_to for CaseStudy
FelixCAAuer Dec 23, 2025
2300065
Split applying k-medoids algorithm and update calls accordingly
FelixCAAuer Dec 23, 2025
b6c55cb
Add suffix.Suffix case to SQLWriter when exporting Pyomo-Model
FelixCAAuer Dec 23, 2025
f13cf44
Fix merge_single_node_buses function
FelixCAAuer Jan 22, 2026
8c98793
Fix order of indices for dPower_Demand
FelixCAAuer Jan 22, 2026
64c39cd
Add safety-check for pEnableInvest in Power_Network
FelixCAAuer Jan 28, 2026
8d944bb
Add printer for linear expressions
FelixCAAuer Jan 28, 2026
e2ec791
Fix aggregation regarding indices i,j,c
FelixCAAuer Jan 28, 2026
efa3848
Add escapes to printer, fix import issue for annotations
FelixCAAuer Jan 29, 2026
fcf6035
Add function to add solver statistics to .sqlite
FelixCAAuer Jan 29, 2026
6082ecb
Implement function to add run parameters to .sqlite files
FelixCAAuer Feb 2, 2026
dfcb37d
Add function to add objective decomposition to sqlite
FelixCAAuer Feb 3, 2026
358f4e6
Add dual information to .sqlite files
FelixCAAuer Feb 3, 2026
2131ab3
Fix warning message for dual-suffix
FelixCAAuer Feb 3, 2026
d6d8a9b
Add re-weighing of Weights_K after filtering timesteps as a default
FelixCAAuer Feb 5, 2026
3b5774b
Add joined zone-string to merged buses
FelixCAAuer Feb 10, 2026
4465ff6
Remove xlrd from environment.yml
FelixCAAuer Feb 25, 2026
afb9a90
Fix circular import in ExcelWriter
FelixCAAuer Feb 25, 2026
4d772b2
Fix small bugs in CaseStudy.py and other files
FelixCAAuer Feb 26, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
243 changes: 181 additions & 62 deletions CaseStudy.py

Large diffs are not rendered by default.

5 changes: 5 additions & 0 deletions ExcelReader.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,6 +332,11 @@ def get_Power_Network(excel_file_path: str, keep_excluded_entries: bool = False,
"""
dPower_Network = __read_non_pivoted_file(excel_file_path, "v0.1.2", ["i", "j", "c"], True, keep_excluded_entries, fail_on_wrong_version)

# Check that all values in column pEnableInvest are either 0 or 1
if not dPower_Network['pEnableInvest'].isin([0, 1]).all():
invalid_values = dPower_Network.loc[~dPower_Network['pEnableInvest'].isin([0, 1]), 'pEnableInvest']
raise ValueError(f"dPower_Network: Found invalid values in 'pEnableInvest' column. Only 0 and 1 are allowed, but found: {invalid_values}")

return dPower_Network


Expand Down
9 changes: 8 additions & 1 deletion ExcelWriter.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
from __future__ import annotations

import os
import time
import xml.etree.ElementTree as ET
from copy import copy, deepcopy
from pathlib import Path
from typing import TYPE_CHECKING

import numpy as np
import openpyxl
Expand All @@ -12,7 +15,9 @@

import ExcelReader
import TableDefinition
from CaseStudy import CaseStudy

if TYPE_CHECKING:
from CaseStudy import CaseStudy
from TableDefinition import CellStyle, Alignment, Font, Color, Text, Column, NumberFormat, TableDefinition
from printer import Printer

Expand Down Expand Up @@ -261,6 +266,8 @@ def write_caseStudy(self, cs: CaseStudy, folder_path: str | Path) -> None:
self.write_Power_VRES(cs.dPower_VRES, folder_path)
if hasattr(cs, "dPower_VRESProfiles"):
self.write_Power_VRESProfiles(cs.dPower_VRESProfiles, folder_path)
if hasattr(cs, "dPower_ImportExport") and cs.dPower_ImportExport is not None:
self.write_Power_ImportExport(cs.dPower_ImportExport, folder_path)
self.write_Power_WeightsK(cs.dPower_WeightsK, folder_path)
self.write_Power_WeightsRP(cs.dPower_WeightsRP, folder_path)

Expand Down
239 changes: 239 additions & 0 deletions SQLiteWriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
def model_to_sqlite(model: pyo.base.Model, filename: str) -> None:
"""
Save the model to a SQLite database.
Automatically includes objective decomposition and dual values.

:param model: Pyomo model to save
:param filename: Path to the SQLite database file
:return: None
Expand Down Expand Up @@ -40,10 +42,247 @@ def model_to_sqlite(model: pyo.base.Model, filename: str) -> None:
df = pd.DataFrame([pyo.value(o)], columns=['values'])
case pyomo.core.base.constraint.ConstraintList | pyomo.core.base.constraint.IndexedConstraint | pyomo.core.base.expression.IndexedExpression: # Those will not be saved on purpose
continue
case pyomo.core.base.suffix.Suffix:
if str(o) in ["_relaxed_integer_vars", "dual"]:
continue # Not saved on purpose
else:
printer.warning(f"Pyomo-Type {type(o)} not implemented, {o.name} will not be saved to SQLite")
continue
case _:
printer.warning(f"Pyomo-Type {type(o)} not implemented, {o.name} will not be saved to SQLite")
continue
df.to_sql(o.name, cnx, if_exists='replace')
cnx.commit()
cnx.close()

# Automatically add objective decomposition and dual values
add_objective_decomposition_to_sqlite(filename, model)
add_dual_values_to_sqlite(filename, model)
pass


def add_solver_statistics_to_sqlite(filename: str, results, work_units=None) -> None:
"""
Add solver statistics (like Gurobi work-units) to an existing SQLite database.
:param filename: Path to the SQLite database file
:param results: Pyomo solver results object
:param work_units: Optional work units value (from Gurobi solver)
:return: None
"""
cnx = sqlite3.connect(filename)

# Extract solver statistics
stats = {}

try:
# Add work units if provided
if work_units is not None:
stats['work_units'] = float(work_units)

# Get basic solver info from solver[0]
if hasattr(results, 'solver') and len(results.solver) > 0:
solver_info = results.solver[0]

# Status and termination
if hasattr(solver_info, 'status'):
stats['solver_status'] = str(solver_info.status)
if hasattr(solver_info, 'termination_condition'):
stats['termination_condition'] = str(solver_info.termination_condition)
if hasattr(solver_info, 'time'):
try:
time_val = solver_info.time
if time_val is not None and str(type(time_val)) != "<class 'pyomo.opt.results.container.UndefinedData'>":
stats['solver_time'] = float(time_val)
except Exception:
pass

# Get problem statistics
if hasattr(results, 'problem'):
problem = results.problem
for attr in ['lower_bound', 'upper_bound', 'number_of_constraints',
'number_of_variables', 'number_of_nonzeros']:
if hasattr(problem, attr):
value = getattr(problem, attr)
if value is not None:
stats[attr] = float(value) if isinstance(value, (int, float)) else str(value)

# Create a DataFrame with solver statistics
if stats:
df = pd.DataFrame([stats])
df.to_sql('solver_statistics', cnx, if_exists='replace', index=False)
cnx.commit()
work_units_str = f"{stats['work_units']:.2f}" if 'work_units' in stats else 'N/A'
printer.information(f"Added solver statistics to SQLite database (work_units: {work_units_str})")
else:
printer.warning("No solver statistics found in results object")

except Exception as e:
printer.error(f"Failed to add solver statistics: {e}")
import traceback
traceback.print_exc()
finally:
cnx.close()


def add_run_parameters_to_sqlite(filename: str, **parameters) -> None:
"""
Add run parameters to an existing SQLite database.
Creates a table 'run_parameters' with parameter names and values.

:param filename: Path to the SQLite database file
:param parameters: Keyword arguments containing parameter names and values
:return: None

Example:
add_run_parameters_to_sqlite('model.sqlite',
zoi='R1',
dc_buffer=2,
tp_buffer=1,
scale_demand=1.3,
scale_pmax=1.0)
"""
cnx = sqlite3.connect(filename)

try:
# Convert parameters to DataFrame
params = {}
for key, value in parameters.items():
# Convert None to string 'None' for storage
if value is None:
params[key] = 'None'
elif isinstance(value, (int, float)):
params[key] = float(value)
else:
params[key] = str(value)

if params:
df = pd.DataFrame([params])
df.to_sql('run_parameters', cnx, if_exists='replace', index=False)
cnx.commit()
printer.information(f"Added run parameters to SQLite database: {', '.join([f'{k}={v}' for k, v in params.items()])}")
else:
printer.warning("No run parameters provided")

except Exception as e:
printer.error(f"Failed to add run parameters: {e}")
import traceback
traceback.print_exc()
finally:
cnx.close()


def add_objective_decomposition_to_sqlite(filename: str, model: pyo.ConcreteModel) -> None:
"""
Add objective function decomposition to SQLite database.
This enables recalculation of ZOI objectives without the full model.

The objective is decomposed into:
- objective_constant: Single row with the constant term
- objective_terms: Variable names, indices, and their coefficients

:param filename: Path to the SQLite database file
:param model: Pyomo model with objective
:return: None
"""
from pyomo.repn import generate_standard_repn

cnx = sqlite3.connect(filename)

try:
# Decompose objective into linear representation
repn = generate_standard_repn(model.objective.expr, quadratic=False)

# Store objective decomposition as separate tables
# 1. Constant term
df_constant = pd.DataFrame([{'constant': repn.constant if repn.constant else 0.0}])
df_constant.to_sql('objective_constant', cnx, if_exists='replace', index=False)

# 2. Variable names, indices, and coefficients
var_names = [var.parent_component().name for var in repn.linear_vars]
var_indices = [str(var.index()) for var in repn.linear_vars]
coefs = list(repn.linear_coefs)
df_terms = pd.DataFrame({
'var_name': var_names,
'var_index': var_indices,
'coefficient': coefs
})
df_terms.to_sql('objective_terms', cnx, if_exists='replace', index=False)

cnx.commit()
printer.information(f"Added objective decomposition to SQLite ({len(var_indices)} terms)")

except Exception as e:
printer.error(f"Failed to add objective decomposition: {e}")
import traceback
traceback.print_exc()
finally:
cnx.close()


def add_dual_values_to_sqlite(filename: str, model: pyo.ConcreteModel) -> None:
"""
Add dual values (shadow prices) from model constraints to SQLite database.

Dual values are stored in tables named 'dual_<constraint_name>' with:
- Index columns for the constraint
- 'dual_value' column containing the dual/shadow price

:param filename: Path to the SQLite database file
:param model: Solved Pyomo model with dual suffix
:return: None
"""
cnx = sqlite3.connect(filename)

try:
if not hasattr(model, 'dual'):
printer.warning("Model does not have dual suffix - no dual values to save")
return

total_duals = 0
total_constraints = 0

# Iterate through all constraints and save their dual values
for constraint in model.component_objects(pyo.Constraint, active=True):
constraint_name = constraint.name
dual_data = []

# Get dual values for this constraint
for index in constraint:
try:
dual_value = model.dual[constraint[index]]
if dual_value is not None:
# Store index and dual value
if isinstance(index, tuple):
# Multi-indexed constraint
row_data = {str(i): val for i, val in enumerate(index)}
row_data['dual_value'] = float(dual_value)
else:
# Single-indexed or scalar constraint
row_data = {'0': index, 'dual_value': float(dual_value)}
dual_data.append(row_data)
total_duals += 1
except (KeyError, AttributeError):
# Dual value not available for this constraint
pass

total_constraints += 1

# Save to database if we have dual values for this constraint
if dual_data:
df = pd.DataFrame(dual_data)
table_name = f'dual_{constraint_name}'
df.to_sql(table_name, cnx, if_exists='replace', index=False)

cnx.commit()

if total_duals > 0:
printer.information(f"Added dual values to SQLite ({total_duals} duals from {total_constraints} constraints)")
else:
printer.warning(f"No dual values found in model (checked {total_constraints} constraints)")

except Exception as e:
printer.error(f"Failed to add dual values: {e}")
import traceback
traceback.print_exc()
finally:
cnx.close()
Loading