1+ import typing
12from typing import Literal , Dict
23
34import numpy as np
67
78from InOutModule .printer import Printer
89
10+ from InOutModule .CaseStudy import CaseStudy
11+
912printer = Printer .getInstance ()
1013
1114
@@ -170,7 +173,7 @@ def _pivot_technologies(df, value_column, index_cols=None):
170173
171174 # Try to merge with Power_VRES data
172175 if (hasattr (case_study , 'dPower_VRES' ) and case_study .dPower_VRES is not None and
173- 'vres_df' in locals () and len (vres_df ) > 0 ):
176+ vres_with_profiles is not None and len (vres_df ) > 0 ):
174177 inflows_with_vres = pd .merge (
175178 inflows_df ,
176179 vres_df [['g' , 'tec' , 'i' , 'ExisUnits' , 'EnableInvest' , 'MaxInvest' ]],
@@ -272,8 +275,9 @@ def _sum_technology_columns(df: pd.DataFrame) -> pd.DataFrame:
272275 return result_df
273276
274277
275- def _run_kmedoids_clustering (pivot_df : pd .DataFrame , k : int , rp_length : int ):
278+ def _run_kmedoids_clustering (pivot_df : pd .DataFrame , k : int , rp_length : int , solver : str = None , verbose : bool = False ):
276279 """Run k-medoids clustering using tsam."""
280+ printer = Printer .getInstance ()
277281
278282 # Prepare data for tsam
279283 pivot_df_sorted = pivot_df .sort_values ('k' )
@@ -284,21 +288,25 @@ def _run_kmedoids_clustering(pivot_df: pd.DataFrame, k: int, rp_length: int):
284288 # Drop grouping columns and set datetime index
285289 clustering_data = pivot_df_sorted .drop (columns = ['scenario' , 'rp' , 'k' ]).set_index ('datetime' )
286290
287- print (f" Running k-medoids with { k } clusters, { rp_length } hours/period, { len (clustering_data )} total hours" )
291+ if verbose :
292+ printer .information (f" Running k-medoids with { k } clusters, { rp_length } hours/period, { len (clustering_data )} total hours" )
288293
289294 # Run clustering
290- aggregation = tsam .TimeSeriesAggregation (
291- clustering_data ,
295+ tsam_kwargs = dict (
292296 noTypicalPeriods = k ,
293297 hoursPerPeriod = rp_length ,
294298 clusterMethod = 'k_medoids' ,
295299 rescaleClusterPeriods = False ,
296- solver = "gurobi"
297300 )
301+ if solver is not None :
302+ tsam_kwargs ['solver' ] = solver
303+
304+ aggregation = tsam .TimeSeriesAggregation (clustering_data , ** tsam_kwargs )
298305
299306 typical_periods = aggregation .createTypicalPeriods ()
300- print (f" Clustering completed. Created { len (typical_periods )} typical periods." )
301- print (f" Cluster center indices (medoids): { aggregation .clusterCenterIndices } " )
307+ if verbose :
308+ printer .information (f" Clustering completed. Created { len (typical_periods )} typical periods." )
309+ printer .information (f" Cluster center indices (medoids): { aggregation .clusterCenterIndices } " )
302310
303311 return aggregation
304312
@@ -386,8 +394,9 @@ def _build_scenario_weights_and_indices(aggregation, scenario: str, rp_length: i
386394 return weights_rp , weights_k , hindex
387395
388396
389- def _update_casestudy_with_scenarios (case_study , all_processed_data : Dict ):
397+ def _update_casestudy_with_scenarios (case_study , all_processed_data : Dict , verbose : bool = False ):
390398 """Update CaseStudy with aggregated data, maintaining original index structures."""
399+ printer = Printer .getInstance ()
391400
392401 # Collect all data across scenarios
393402 all_demand_data = []
@@ -405,45 +414,54 @@ def _update_casestudy_with_scenarios(case_study, all_processed_data: Dict):
405414 all_weights_k_data .extend (scenario_data ['weights_k' ])
406415 all_hindex_data .extend (scenario_data ['hindex' ])
407416
408- print (f"Updating CaseStudy with combined data:" )
417+ if verbose :
418+ printer .information (f"Updating CaseStudy with combined data:" )
409419
410420 if all_demand_data :
411421 demand_df = pd .DataFrame (all_demand_data )
412422 case_study .dPower_Demand = demand_df .set_index (['rp' , 'k' , 'i' ])
413- print (f" - Updated demand: { len (all_demand_data )} entries" )
423+ if verbose :
424+ printer .information (f" - Updated demand: { len (all_demand_data )} entries" )
414425
415426 if all_vres_data :
416427 vres_df = pd .DataFrame (all_vres_data )
417428 case_study .dPower_VRESProfiles = vres_df .set_index (['rp' , 'k' , 'g' ])
418- print (f" - Updated VRES profiles: { len (all_vres_data )} entries" )
429+ if verbose :
430+ printer .information (f" - Updated VRES profiles: { len (all_vres_data )} entries" )
419431
420432 if all_inflows_data :
421433 inflows_df = pd .DataFrame (all_inflows_data )
422434 case_study .dPower_Inflows = inflows_df .set_index (['rp' , 'k' , 'g' ])
423- print (f" - Updated inflows: { len (all_inflows_data )} entries" )
435+ if verbose :
436+ printer .information (f" - Updated inflows: { len (all_inflows_data )} entries" )
424437
425438 if all_weights_rp_data :
426439 weights_rp_df = pd .DataFrame (all_weights_rp_data )
427440 case_study .dPower_WeightsRP = weights_rp_df .set_index (['rp' ])
428- print (f" - Updated RP weights: { len (all_weights_rp_data )} entries" )
441+ if verbose :
442+ printer .information (f" - Updated RP weights: { len (all_weights_rp_data )} entries" )
429443
430444 if all_weights_k_data :
431445 weights_k_df = pd .DataFrame (all_weights_k_data )
432446 case_study .dPower_WeightsK = weights_k_df .set_index (['k' ])
433- print (f" - Updated K weights: { len (all_weights_k_data )} entries" )
447+ if verbose :
448+ printer .information (f" - Updated K weights: { len (all_weights_k_data )} entries" )
434449
435450 if all_hindex_data :
436451 hindex_df = pd .DataFrame (all_hindex_data )
437452 case_study .dPower_Hindex = hindex_df .set_index (['p' , 'rp' , 'k' ])
438- print (f" - Updated Hindex: { len (all_hindex_data )} entries" )
453+ if verbose :
454+ printer .information (f" - Updated Hindex: { len (all_hindex_data )} entries" )
439455
440- print ("CaseStudy update completed successfully!" )
456+ if verbose :
457+ printer .information ("CaseStudy update completed successfully!" )
441458
442459
443460def get_kmedoids_representative_periods (case_study , number_rps : int , rp_length : int = 24 ,
444461 cluster_strategy : Literal ["aggregated" , "disaggregated" ] = "aggregated" ,
445462 capacity_normalization : Literal ["installed" , "maxInvestment" ] = "maxInvestment" ,
446- sum_production : bool = False , verbose : bool = False ) -> dict [str , tsam .TimeSeriesAggregation ]:
463+ sum_production : bool = False , solver : str = "gurobi" ,
464+ verbose : bool = False ) -> dict [str , tsam .TimeSeriesAggregation ]:
447465 """
448466 Get the representative periods using k-medoids temporal aggregation. Does not modify the original CaseStudy.
449467 Each scenario from dGlobal_Scenarios is processed independently.
@@ -454,6 +472,7 @@ def get_kmedoids_representative_periods(case_study, number_rps: int, rp_length:
454472 :param cluster_strategy: "aggregated" (sum across buses) or "disaggregated" (keep buses separate)
455473 :param capacity_normalization: "installed" or "maxInvestment" for VRES capacity factor weighting
456474 :param sum_production: If True, sum all technologies into single production column
475+ :param solver: Solver to use for k-medoids clustering (e.g. "gurobi", "glpk"). Defaults to "gurobi".
457476 :param verbose: If True, print detailed processing information
458477
459478 :return: TSAM TimeSeriesAggregation object with representative periods for each scenario
@@ -484,7 +503,7 @@ def get_kmedoids_representative_periods(case_study, number_rps: int, rp_length:
484503 printer .information (f"Prepared { len (pivot_df )} time periods for clustering" ) if verbose else None
485504
486505 printer .information (f"Running k-medoids clustering (k={ number_rps } , rp_length={ rp_length } )" ) if verbose else None
487- aggregation_result = _run_kmedoids_clustering (pivot_df , number_rps , rp_length )
506+ aggregation_result = _run_kmedoids_clustering (pivot_df , number_rps , rp_length , solver = solver , verbose = verbose )
488507
489508 printer .information (f"Aggregation result for scenario { scenario } received after { aggregation_result .clusteringDuration } seconds" ) if verbose else None
490509 all_scenario_results [scenario ] = aggregation_result
@@ -496,7 +515,7 @@ def apply_representative_periods(
496515 aggregation : dict [str , tsam .TimeSeriesAggregation ],
497516 rp_length : int = 24 ,
498517 inplace : bool = False ,
499- verbose : bool = False ) -> None :
518+ verbose : bool = False ) -> typing . Optional [ CaseStudy ] :
500519 """
501520 Apply precomputed representative periods to a CaseStudy object.
502521 Each scenario from dGlobal_Scenarios is processed independently.
@@ -539,7 +558,7 @@ def apply_representative_periods(
539558 printer .information (f"Scenario { scenario } completed successfully" ) if verbose else None
540559
541560 # Update CaseStudy with aggregated data
542- _update_casestudy_with_scenarios (aggregated_case_study , all_processed_data )
561+ _update_casestudy_with_scenarios (aggregated_case_study , all_processed_data , verbose = verbose )
543562
544563 printer .information (f"\n All scenarios have been processed and combined successfully!" ) if verbose else None
545564 if not inplace :
@@ -555,6 +574,7 @@ def apply_kmedoids_aggregation(
555574 cluster_strategy : Literal ["aggregated" , "disaggregated" ] = "aggregated" ,
556575 capacity_normalization : Literal ["installed" , "maxInvestment" ] = "maxInvestment" ,
557576 sum_production : bool = False ,
577+ solver : str = "gurobi" ,
558578 inplace : bool = False ,
559579 verbose : bool = False ):
560580 """
@@ -567,6 +587,7 @@ def apply_kmedoids_aggregation(
567587 :param cluster_strategy: "aggregated" (sum across buses) or "disaggregated" (keep buses separate)
568588 :param capacity_normalization: "installed" or "maxInvestment" for VRES capacity factor weighting
569589 :param sum_production: If True, sum all technologies into single production column
590+ :param solver: Solver to use for k-medoids clustering (e.g. "gurobi", "glpk"). Defaults to "gurobi".
570591 :param inplace: If True, modify the original CaseStudy; otherwise, return a new one
571592 :param verbose: If True, print detailed processing information
572593
@@ -581,6 +602,7 @@ def apply_kmedoids_aggregation(
581602 cluster_strategy = cluster_strategy ,
582603 capacity_normalization = capacity_normalization ,
583604 sum_production = sum_production ,
605+ solver = solver ,
584606 verbose = verbose
585607 )
586608
0 commit comments