Skip to content

Commit 4d772b2

Browse files
committed
Fix small bugs in CaseStudy.py and other files
Fix some output strings Add printer to Utilities, add solver for kmedoids
1 parent afb9a90 commit 4d772b2

6 files changed

Lines changed: 55 additions & 40 deletions

File tree

CaseStudy.py

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -254,16 +254,6 @@ def __init__(self,
254254

255255
def copy(self):
256256
new_self = copy.deepcopy(self)
257-
258-
# Copy each dataframe individually to ensure no shared references
259-
for df in (new_self.rpk_dependent_dataframes + new_self.rp_only_dependent_dataframes + new_self.k_only_dependent_dataframes + new_self.non_time_dependent_dataframes + new_self.non_dependent_dataframes):
260-
if hasattr(new_self, df) and getattr(new_self, df) is not None:
261-
original = getattr(new_self, df)
262-
if type(original) is pd.DataFrame:
263-
setattr(new_self, df, original.copy(deep=True))
264-
else:
265-
setattr(new_self, df, copy.deepcopy(original))
266-
267257
return new_self
268258

269259
def equal_to(self, cs: typing.Self) -> bool:
@@ -612,15 +602,15 @@ def merge_single_node_buses(self, inplace: bool = True) -> typing.Optional[typin
612602
cs.dPower_Network = cs.dPower_Network.groupby(['i', 'j', 'c']).agg(aggregation_methods_for_columns)
613603

614604
### Adapt dPower_ThermalGen
615-
if hasattr(self, "dPower_ThermalGen"):
605+
if hasattr(cs, "dPower_ThermalGen"):
616606
cs.dPower_ThermalGen.loc[cs.dPower_ThermalGen['i'].isin(connected_buses), 'i'] = new_bus_name
617607

618608
# Adapt dPower_VRES
619-
if hasattr(self, "dPower_VRES"):
609+
if hasattr(cs, "dPower_VRES"):
620610
cs.dPower_VRES.loc[cs.dPower_VRES['i'].isin(connected_buses), 'i'] = new_bus_name
621611

622612
# Adapt dPower_Storage
623-
if hasattr(self, "dPower_Storage"):
613+
if hasattr(cs, "dPower_Storage"):
624614
cs.dPower_Storage.loc[cs.dPower_Storage['i'].isin(connected_buses), 'i'] = new_bus_name
625615

626616
# Adapt dPower_Demand
@@ -635,6 +625,8 @@ def merge_single_node_buses(self, inplace: bool = True) -> typing.Optional[typin
635625
}
636626
cs.dPower_Demand = cs.dPower_Demand.groupby(['rp', 'k', 'i']).agg(aggregation_methods_power_demand)
637627

628+
return cs if not inplace else None
629+
638630
# Create transition matrix from Hindex
639631
def get_rpTransitionMatrices(self, clip_method: str = "none", clip_value: float = 0) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
640632
rps = sorted(self.dPower_Hindex.index.get_level_values('rp').unique().tolist())
@@ -667,7 +659,7 @@ def get_rpTransitionMatrices(self, clip_method: str = "none", clip_value: float
667659
raise ValueError(f"For 'relative_to_highest', clip_value must be between 0 and 1, not {clip_value}.")
668660
for rp in rps:
669661
threshold = rpTransitionMatrixAbsolute.loc[rp].max() * clip_value
670-
rpTransitionMatrixAbsolute.loc[rp][rpTransitionMatrixAbsolute.loc[rp] < threshold] = 0
662+
rpTransitionMatrixAbsolute.loc[rp, rpTransitionMatrixAbsolute.loc[rp] < threshold] = 0
671663
case _:
672664
raise ValueError(f"clip_method must be either 'none', 'absolute_count' or 'relative_to_highest', not {clip_method}.")
673665

@@ -901,7 +893,7 @@ def apply_kmedoids_aggregation(self, number_rps: int, rp_length: int = 24,
901893
"""
902894

903895
cs = self if inplace else self.copy()
904-
Utilities.apply_kmedoids_aggregation(cs, number_rps, rp_length, cluster_strategy, capacity_normalization, sum_production, inplace=True)
896+
Utilities.apply_kmedoids_aggregation(cs, number_rps, rp_length, cluster_strategy, capacity_normalization, sum_production, inplace=True, verbose=verbose)
905897
if inplace:
906898
return None
907899
else:

ExcelReader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ def get_Power_Network(excel_file_path: str, keep_excluded_entries: bool = False,
335335
# Check that all values in column pEnableInvest are either 0 or 1
336336
if not dPower_Network['pEnableInvest'].isin([0, 1]).all():
337337
invalid_values = dPower_Network.loc[~dPower_Network['pEnableInvest'].isin([0, 1]), 'pEnableInvest']
338-
raise ValueError(f"dPower_Network: Found invalid values in 'EnableInvest' column. Only 0 and 1 are allowed, but found: {invalid_values}")
338+
raise ValueError(f"dPower_Network: Found invalid values in 'pEnableInvest' column. Only 0 and 1 are allowed, but found: {invalid_values}")
339339

340340
return dPower_Network
341341

ExcelWriter.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -266,6 +266,8 @@ def write_caseStudy(self, cs: CaseStudy, folder_path: str | Path) -> None:
266266
self.write_Power_VRES(cs.dPower_VRES, folder_path)
267267
if hasattr(cs, "dPower_VRESProfiles"):
268268
self.write_Power_VRESProfiles(cs.dPower_VRESProfiles, folder_path)
269+
if hasattr(cs, "dPower_ImportExport") and cs.dPower_ImportExport is not None:
270+
self.write_Power_ImportExport(cs.dPower_ImportExport, folder_path)
269271
self.write_Power_WeightsK(cs.dPower_WeightsK, folder_path)
270272
self.write_Power_WeightsRP(cs.dPower_WeightsRP, folder_path)
271273

SQLiteWriter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def add_solver_statistics_to_sqlite(filename: str, results, work_units=None) ->
9393
time_val = solver_info.time
9494
if time_val is not None and str(type(time_val)) != "<class 'pyomo.opt.results.container.UndefinedData'>":
9595
stats['solver_time'] = float(time_val)
96-
except:
96+
except Exception:
9797
pass
9898

9999
# Get problem statistics

Utilities.py

Lines changed: 43 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import typing
12
from typing import Literal, Dict
23

34
import numpy as np
@@ -6,6 +7,8 @@
67

78
from InOutModule.printer import Printer
89

10+
from InOutModule.CaseStudy import CaseStudy
11+
912
printer = Printer.getInstance()
1013

1114

@@ -170,7 +173,7 @@ def _pivot_technologies(df, value_column, index_cols=None):
170173

171174
# Try to merge with Power_VRES data
172175
if (hasattr(case_study, 'dPower_VRES') and case_study.dPower_VRES is not None and
173-
'vres_df' in locals() and len(vres_df) > 0):
176+
vres_with_profiles is not None and len(vres_df) > 0):
174177
inflows_with_vres = pd.merge(
175178
inflows_df,
176179
vres_df[['g', 'tec', 'i', 'ExisUnits', 'EnableInvest', 'MaxInvest']],
@@ -272,8 +275,9 @@ def _sum_technology_columns(df: pd.DataFrame) -> pd.DataFrame:
272275
return result_df
273276

274277

275-
def _run_kmedoids_clustering(pivot_df: pd.DataFrame, k: int, rp_length: int):
278+
def _run_kmedoids_clustering(pivot_df: pd.DataFrame, k: int, rp_length: int, solver: str = None, verbose: bool = False):
276279
"""Run k-medoids clustering using tsam."""
280+
printer = Printer.getInstance()
277281

278282
# Prepare data for tsam
279283
pivot_df_sorted = pivot_df.sort_values('k')
@@ -284,21 +288,25 @@ def _run_kmedoids_clustering(pivot_df: pd.DataFrame, k: int, rp_length: int):
284288
# Drop grouping columns and set datetime index
285289
clustering_data = pivot_df_sorted.drop(columns=['scenario', 'rp', 'k']).set_index('datetime')
286290

287-
print(f" Running k-medoids with {k} clusters, {rp_length} hours/period, {len(clustering_data)} total hours")
291+
if verbose:
292+
printer.information(f" Running k-medoids with {k} clusters, {rp_length} hours/period, {len(clustering_data)} total hours")
288293

289294
# Run clustering
290-
aggregation = tsam.TimeSeriesAggregation(
291-
clustering_data,
295+
tsam_kwargs = dict(
292296
noTypicalPeriods=k,
293297
hoursPerPeriod=rp_length,
294298
clusterMethod='k_medoids',
295299
rescaleClusterPeriods=False,
296-
solver="gurobi"
297300
)
301+
if solver is not None:
302+
tsam_kwargs['solver'] = solver
303+
304+
aggregation = tsam.TimeSeriesAggregation(clustering_data, **tsam_kwargs)
298305

299306
typical_periods = aggregation.createTypicalPeriods()
300-
print(f" Clustering completed. Created {len(typical_periods)} typical periods.")
301-
print(f" Cluster center indices (medoids): {aggregation.clusterCenterIndices}")
307+
if verbose:
308+
printer.information(f" Clustering completed. Created {len(typical_periods)} typical periods.")
309+
printer.information(f" Cluster center indices (medoids): {aggregation.clusterCenterIndices}")
302310

303311
return aggregation
304312

@@ -386,8 +394,9 @@ def _build_scenario_weights_and_indices(aggregation, scenario: str, rp_length: i
386394
return weights_rp, weights_k, hindex
387395

388396

389-
def _update_casestudy_with_scenarios(case_study, all_processed_data: Dict):
397+
def _update_casestudy_with_scenarios(case_study, all_processed_data: Dict, verbose: bool = False):
390398
"""Update CaseStudy with aggregated data, maintaining original index structures."""
399+
printer = Printer.getInstance()
391400

392401
# Collect all data across scenarios
393402
all_demand_data = []
@@ -405,45 +414,54 @@ def _update_casestudy_with_scenarios(case_study, all_processed_data: Dict):
405414
all_weights_k_data.extend(scenario_data['weights_k'])
406415
all_hindex_data.extend(scenario_data['hindex'])
407416

408-
print(f"Updating CaseStudy with combined data:")
417+
if verbose:
418+
printer.information(f"Updating CaseStudy with combined data:")
409419

410420
if all_demand_data:
411421
demand_df = pd.DataFrame(all_demand_data)
412422
case_study.dPower_Demand = demand_df.set_index(['rp', 'k', 'i'])
413-
print(f" - Updated demand: {len(all_demand_data)} entries")
423+
if verbose:
424+
printer.information(f" - Updated demand: {len(all_demand_data)} entries")
414425

415426
if all_vres_data:
416427
vres_df = pd.DataFrame(all_vres_data)
417428
case_study.dPower_VRESProfiles = vres_df.set_index(['rp', 'k', 'g'])
418-
print(f" - Updated VRES profiles: {len(all_vres_data)} entries")
429+
if verbose:
430+
printer.information(f" - Updated VRES profiles: {len(all_vres_data)} entries")
419431

420432
if all_inflows_data:
421433
inflows_df = pd.DataFrame(all_inflows_data)
422434
case_study.dPower_Inflows = inflows_df.set_index(['rp', 'k', 'g'])
423-
print(f" - Updated inflows: {len(all_inflows_data)} entries")
435+
if verbose:
436+
printer.information(f" - Updated inflows: {len(all_inflows_data)} entries")
424437

425438
if all_weights_rp_data:
426439
weights_rp_df = pd.DataFrame(all_weights_rp_data)
427440
case_study.dPower_WeightsRP = weights_rp_df.set_index(['rp'])
428-
print(f" - Updated RP weights: {len(all_weights_rp_data)} entries")
441+
if verbose:
442+
printer.information(f" - Updated RP weights: {len(all_weights_rp_data)} entries")
429443

430444
if all_weights_k_data:
431445
weights_k_df = pd.DataFrame(all_weights_k_data)
432446
case_study.dPower_WeightsK = weights_k_df.set_index(['k'])
433-
print(f" - Updated K weights: {len(all_weights_k_data)} entries")
447+
if verbose:
448+
printer.information(f" - Updated K weights: {len(all_weights_k_data)} entries")
434449

435450
if all_hindex_data:
436451
hindex_df = pd.DataFrame(all_hindex_data)
437452
case_study.dPower_Hindex = hindex_df.set_index(['p', 'rp', 'k'])
438-
print(f" - Updated Hindex: {len(all_hindex_data)} entries")
453+
if verbose:
454+
printer.information(f" - Updated Hindex: {len(all_hindex_data)} entries")
439455

440-
print("CaseStudy update completed successfully!")
456+
if verbose:
457+
printer.information("CaseStudy update completed successfully!")
441458

442459

443460
def get_kmedoids_representative_periods(case_study, number_rps: int, rp_length: int = 24,
444461
cluster_strategy: Literal["aggregated", "disaggregated"] = "aggregated",
445462
capacity_normalization: Literal["installed", "maxInvestment"] = "maxInvestment",
446-
sum_production: bool = False, verbose: bool = False) -> dict[str, tsam.TimeSeriesAggregation]:
463+
sum_production: bool = False, solver: str = "gurobi",
464+
verbose: bool = False) -> dict[str, tsam.TimeSeriesAggregation]:
447465
"""
448466
Get the representative periods using k-medoids temporal aggregation. Does not modify the original CaseStudy.
449467
Each scenario from dGlobal_Scenarios is processed independently.
@@ -454,6 +472,7 @@ def get_kmedoids_representative_periods(case_study, number_rps: int, rp_length:
454472
:param cluster_strategy: "aggregated" (sum across buses) or "disaggregated" (keep buses separate)
455473
:param capacity_normalization: "installed" or "maxInvestment" for VRES capacity factor weighting
456474
:param sum_production: If True, sum all technologies into single production column
475+
:param solver: Solver to use for k-medoids clustering (e.g. "gurobi", "glpk"). Defaults to "gurobi".
457476
:param verbose: If True, print detailed processing information
458477
459478
:return: TSAM TimeSeriesAggregation object with representative periods for each scenario
@@ -484,7 +503,7 @@ def get_kmedoids_representative_periods(case_study, number_rps: int, rp_length:
484503
printer.information(f"Prepared {len(pivot_df)} time periods for clustering") if verbose else None
485504

486505
printer.information(f"Running k-medoids clustering (k={number_rps}, rp_length={rp_length})") if verbose else None
487-
aggregation_result = _run_kmedoids_clustering(pivot_df, number_rps, rp_length)
506+
aggregation_result = _run_kmedoids_clustering(pivot_df, number_rps, rp_length, solver=solver, verbose=verbose)
488507

489508
printer.information(f"Aggregation result for scenario {scenario} received after {aggregation_result.clusteringDuration} seconds") if verbose else None
490509
all_scenario_results[scenario] = aggregation_result
@@ -496,7 +515,7 @@ def apply_representative_periods(
496515
aggregation: dict[str, tsam.TimeSeriesAggregation],
497516
rp_length: int = 24,
498517
inplace: bool = False,
499-
verbose: bool = False) -> None:
518+
verbose: bool = False) -> typing.Optional[CaseStudy]:
500519
"""
501520
Apply precomputed representative periods to a CaseStudy object.
502521
Each scenario from dGlobal_Scenarios is processed independently.
@@ -539,7 +558,7 @@ def apply_representative_periods(
539558
printer.information(f"Scenario {scenario} completed successfully") if verbose else None
540559

541560
# Update CaseStudy with aggregated data
542-
_update_casestudy_with_scenarios(aggregated_case_study, all_processed_data)
561+
_update_casestudy_with_scenarios(aggregated_case_study, all_processed_data, verbose=verbose)
543562

544563
printer.information(f"\nAll scenarios have been processed and combined successfully!") if verbose else None
545564
if not inplace:
@@ -555,6 +574,7 @@ def apply_kmedoids_aggregation(
555574
cluster_strategy: Literal["aggregated", "disaggregated"] = "aggregated",
556575
capacity_normalization: Literal["installed", "maxInvestment"] = "maxInvestment",
557576
sum_production: bool = False,
577+
solver: str = "gurobi",
558578
inplace: bool = False,
559579
verbose: bool = False):
560580
"""
@@ -567,6 +587,7 @@ def apply_kmedoids_aggregation(
567587
:param cluster_strategy: "aggregated" (sum across buses) or "disaggregated" (keep buses separate)
568588
:param capacity_normalization: "installed" or "maxInvestment" for VRES capacity factor weighting
569589
:param sum_production: If True, sum all technologies into single production column
590+
:param solver: Solver to use for k-medoids clustering (e.g. "gurobi", "glpk"). Defaults to "gurobi".
570591
:param inplace: If True, modify the original CaseStudy; otherwise, return a new one
571592
:param verbose: If True, print detailed processing information
572593
@@ -581,6 +602,7 @@ def apply_kmedoids_aggregation(
581602
cluster_strategy=cluster_strategy,
582603
capacity_normalization=capacity_normalization,
583604
sum_production=sum_production,
605+
solver=solver,
584606
verbose=verbose
585607
)
586608

printer.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
import datetime
44

5-
import pyomo
65
from rich.console import Console
76
from rich.markup import escape
87

@@ -187,7 +186,7 @@ def information(self, text: str, prefix: str = "", hard_wrap_chars: str = None):
187186
self._log(f"{prefix}{text}")
188187
return None
189188

190-
def linear_expression(self, expr: pyomo.core.expr.numeric_expr.LinearExpression) -> None:
189+
def linear_expression(self, expr) -> None:
191190
"""
192191
Pretty-prints a linear expression to the console and logs it to the logfile if one is set.
193192

0 commit comments

Comments
 (0)