ISPyPSA API
The ISPyPSA API is the set of Python functions used to carry out the ISPyPSA modelling workflow. When the CLI is used the API runs in the background to carry out the workflow.
Advanced users wanting more control or flexibility from the modelling workflow might want to use ISPyPSA directly through the API. An example of how the default ISPyPSA workflow is implemented using the API is provided below. We suggest API users start by running and understanding the default workflow and then adapting it for their use case.
Each API function is also documented individually after the workflow example.
API default workflow
Below is an example of the default ISPyPSA workflow implemented using the Python API. This is the same workflow which the CLI follows. To use this workflow you simply need to edit the code to point at an ispypsa_config.yaml file of your choice, then run the Python script.
from pathlib import Path
from ispypsa.config import load_config
from ispypsa.data_fetch import read_csvs, write_csvs
from ispypsa.iasr_table_caching import build_local_cache
from ispypsa.logging import configure_logging
from ispypsa.plotting import (
create_plot_suite,
generate_results_website,
save_plots,
)
from ispypsa.pypsa_build import (
build_pypsa_network,
save_pypsa_network,
update_network_timeseries,
)
from ispypsa.results import (
extract_regions_and_zones_mapping,
extract_tabular_results,
)
from ispypsa.templater import (
create_ispypsa_inputs_template,
load_manually_extracted_tables,
)
from ispypsa.translator import (
create_pypsa_friendly_inputs,
create_pypsa_friendly_timeseries_inputs,
)
# Load model config.
config_path = Path("ispypsa_config.yaml")
config = load_config(config_path)
# Load base paths from config
parsed_workbook_cache = Path(config.paths.parsed_workbook_cache)
parsed_traces_directory = (
Path(config.paths.parsed_traces_directory) / f"isp_{config.trace_data.dataset_year}"
)
workbook_path = Path(config.paths.workbook_path)
run_directory = Path(config.paths.run_directory)
# Construct full paths from base paths
ispypsa_input_tables_directory = (
run_directory / config.paths.ispypsa_run_name / "ispypsa_inputs"
)
pypsa_friendly_inputs_location = (
run_directory / config.paths.ispypsa_run_name / "pypsa_friendly"
)
capacity_expansion_timeseries_location = (
pypsa_friendly_inputs_location / "capacity_expansion_timeseries"
)
operational_timeseries_location = (
pypsa_friendly_inputs_location / "operational_timeseries"
)
pypsa_outputs_directory = run_directory / config.paths.ispypsa_run_name / "outputs"
capacity_expansion_tabular_results_directory = (
pypsa_outputs_directory / "capacity_expansion_tables"
)
capacity_expansion_plot_results_directory = (
pypsa_outputs_directory / "capacity_expansion_plots"
)
operational_tabular_results_directory = pypsa_outputs_directory / "operational_tables"
operational_plot_results_directory = pypsa_outputs_directory / "operational_plots"
# Create output directories if they don't exist
parsed_workbook_cache.mkdir(parents=True, exist_ok=True)
ispypsa_input_tables_directory.mkdir(parents=True, exist_ok=True)
pypsa_friendly_inputs_location.mkdir(parents=True, exist_ok=True)
capacity_expansion_timeseries_location.mkdir(parents=True, exist_ok=True)
operational_timeseries_location.mkdir(parents=True, exist_ok=True)
pypsa_outputs_directory.mkdir(parents=True, exist_ok=True)
capacity_expansion_tabular_results_directory.mkdir(parents=True, exist_ok=True)
capacity_expansion_plot_results_directory.mkdir(parents=True, exist_ok=True)
operational_tabular_results_directory.mkdir(parents=True, exist_ok=True)
operational_plot_results_directory.mkdir(parents=True, exist_ok=True)
configure_logging()
# Build the local cache from the workbook
build_local_cache(parsed_workbook_cache, workbook_path, config.iasr_workbook_version)
# Load ISP IASR data tables.
iasr_tables = read_csvs(parsed_workbook_cache)
manually_extracted_tables = load_manually_extracted_tables(config.iasr_workbook_version)
# Create ISPyPSA inputs from IASR tables.
ispypsa_tables = create_ispypsa_inputs_template(
config.scenario,
config.network.nodes.regional_granularity,
iasr_tables,
manually_extracted_tables,
config.filter_by_nem_regions,
config.filter_by_isp_sub_regions,
)
write_csvs(ispypsa_tables, ispypsa_input_tables_directory)
# Suggested stage of user interaction:
# At this stage of the workflow the user can modify ispypsa input files, either
# manually or programmatically, to run alternative scenarios using the template
# generated from the chosen ISP scenario.
# Translate ISPyPSA format to a PyPSA friendly format.
pypsa_friendly_input_tables = create_pypsa_friendly_inputs(config, ispypsa_tables)
# Create timeseries inputs and snapshots
pypsa_friendly_input_tables["snapshots"] = create_pypsa_friendly_timeseries_inputs(
config,
"capacity_expansion",
ispypsa_tables,
pypsa_friendly_input_tables["generators"],
parsed_traces_directory,
capacity_expansion_timeseries_location,
)
write_csvs(pypsa_friendly_input_tables, pypsa_friendly_inputs_location)
# Build a PyPSA network object.
network = build_pypsa_network(
pypsa_friendly_input_tables,
capacity_expansion_timeseries_location,
)
# Solve for least cost operation/expansion
# Never use network.optimize() as this will remove custom constraints.
network.optimize.solve_model(solver_name=config.solver)
# Save capacity expansion results
save_pypsa_network(network, pypsa_outputs_directory, "capacity_expansion")
capacity_expansion_results = extract_tabular_results(network, ispypsa_tables)
capacity_expansion_results["regions_and_zones_mapping"] = (
extract_regions_and_zones_mapping(ispypsa_tables)
)
write_csvs(capacity_expansion_results, capacity_expansion_tabular_results_directory)
# Create and save capacity expansion plots
capacity_expansion_plots = create_plot_suite(capacity_expansion_results)
save_plots(capacity_expansion_plots, capacity_expansion_plot_results_directory)
# Generate capacity expansion results website
generate_results_website(
capacity_expansion_plots,
capacity_expansion_plot_results_directory,
pypsa_outputs_directory,
site_name=f"{config.paths.ispypsa_run_name}",
output_filename="capacity_expansion_results_viewer.html",
subtitle="Capacity Expansion Analysis",
regions_and_zones_mapping=capacity_expansion_results["regions_and_zones_mapping"],
)
# Operational modelling extension
operational_snapshots = create_pypsa_friendly_timeseries_inputs(
config,
"operational",
ispypsa_tables,
pypsa_friendly_input_tables["generators"],
parsed_traces_directory,
operational_timeseries_location,
)
write_csvs(
{"operational_snapshots": operational_snapshots}, pypsa_friendly_inputs_location
)
update_network_timeseries(
network,
pypsa_friendly_input_tables,
operational_snapshots,
operational_timeseries_location,
)
network.optimize.fix_optimal_capacities()
# Never use network.optimize() as this will remove custom constraints.
network.optimize.optimize_with_rolling_horizon(
horizon=config.temporal.operational.horizon,
overlap=config.temporal.operational.overlap,
)
save_pypsa_network(network, pypsa_outputs_directory, "operational")
# Extract and save operational results
operational_results = extract_tabular_results(network, ispypsa_tables)
operational_results["regions_and_zones_mapping"] = extract_regions_and_zones_mapping(
ispypsa_tables
)
write_csvs(operational_results, operational_tabular_results_directory)
# Create and save operational plots
operational_plots = create_plot_suite(operational_results)
save_plots(operational_plots, operational_plot_results_directory)
# Generate operational results website
generate_results_website(
operational_plots,
operational_plot_results_directory,
pypsa_outputs_directory,
site_name=f"{config.paths.ispypsa_run_name}",
output_filename="operational_results_viewer.html",
subtitle="Operational Analysis",
regions_and_zones_mapping=operational_results["regions_and_zones_mapping"],
)
Configuration & Logging
ispypsa.config.load_config
Load and validate configuration from a YAML file.
Examples:
Perform required imports.
Load the configuration from a YAML file.
Access configuration values.
Parameters:
-
config_path(str | Path) –Path to the YAML configuration file
Returns:
-
ModelConfig(ModelConfig) –Validated configuration object
Raises:
-
ValidationError–If the configuration is invalid
-
FileNotFoundError–If the config file doesn't exist
-
YAMLError–If the YAML is malformed
Source code in src\ispypsa\config\loader.py
ispypsa.logging.configure_logging
configure_logging(console: bool = True, console_level: int = logging.WARNING, file: bool = True, file_level: int = logging.INFO, log_file: str = 'ISPyPSA.log') -> None
Configures ISPyPSA logging
Examples:
Perform required imports.
Configure logging with default settings (console warnings, file info).
Configure logging with custom settings.
>>> configure_logging(
... console=True,
... console_level=logging.INFO,
... file=True,
... file_level=logging.DEBUG,
... log_file="my_run.log"
... )
Disable file logging.
Parameters:
-
console(bool, default:True) –Whether to log to the console. Defaults to True.
-
console_level(int, default:WARNING) –Level of the console logging. Defaults to logging.WARNING.
-
file(bool, default:True) –Whether to log to a log file. Defaults to True.
-
file_level(int, default:INFO) –Level of the file logging. Defaults to logging.INFO.
-
log_file(str, default:'ISPyPSA.log') –Name of the logging file. Defaults to "ISPyPSA.log".
Returns:
-
None–None
Source code in src\ispypsa\logging.py
Data Fetching & Caching
ispypsa.iasr_table_caching.build_local_cache
build_local_cache(cache_path: Path | str, workbook_path: Path | str, iasr_workbook_version: str) -> None
Uses isp-workbook-parser to build a local cache of parsed workbook CSVs
Examples:
Perform required imports.
Build the local cache of parsed workbook CSVs.
>>> build_local_cache(
... cache_path=Path("parsed_workbook_cache"),
... workbook_path=Path("path/to/ISP_Workbook.xlsx"),
... iasr_workbook_version="6.0"
... )
Parameters:
-
cache_path(Path | str) –Path that should be created for the local cache
-
workbook_path(Path | str) –Path to an ISP Assumptions Workbook that is supported by
isp-workbook-parser -
iasr_workbook_version(str) –str specifying the version of the work being used.
Returns:
-
None–None
Source code in src\ispypsa\iasr_table_caching\local_cache.py
ispypsa.data_fetch.read_csvs
Read all the CSVs in a directory into a dictionary with filenames (without csv extension) as keys.
Examples:
Perform required imports.
Read CSVs from a directory containing parsed workbook tables.
Read CSVs from a directory containing ISPyPSA input tables.
Access a specific table from the dictionary.
Parameters:
-
directory(Path | str) –Path to directory to read CSVs from.
Returns:
-
dict[str:(DataFrame)]–dict[str, pd.DataFrame]: Dictionary with filenames (without .csv extension)
-
dict[str:(DataFrame)]–as keys and DataFrames as values.
Source code in src\ispypsa\data_fetch\csv_read_write.py
ispypsa.data_fetch.write_csvs
Write all pd.DataFrames in a dictionary with filenames as keys (without csv extension) to CSVs.
Examples:
Perform required imports.
Write ISPyPSA input tables to a directory.
Write PyPSA-friendly tables to a directory.
Write model results to a directory.
Parameters:
-
data_dict(dict[str:(DataFrame)]) –Dictionary of pd.DataFrames to write to csv files.
-
directory(Path | str) –Path to directory to save CSVs to.
Returns:
-
–
None
Source code in src\ispypsa\data_fetch\csv_read_write.py
ispypsa.data_fetch.fetch_workbook
Download ISP workbook file.
Downloads the ISP workbook for the specified version from the manifest to the specified file path.
Examples:
>>> fetch_workbook("6.0", "data/workbooks/isp_6.0.xlsx")
# Downloads ISP 6.0 workbook to data/workbooks/isp_6.0.xlsx
Parameters:
-
workbook_version–str Version string (e.g., "6.0")
-
save_path–Path | str Full path where the workbook file should be saved (e.g., "data/workbooks/6.0.xlsx")
Returns:
-
None–None
Raises:
-
FileNotFoundError–If the manifest file does not exist
-
HTTPError–If the download fails
Source code in src\ispypsa\data_fetch\download.py
Templating (ISPyPSA Input Creation)
ispypsa.templater.create_ispypsa_inputs_template
create_ispypsa_inputs_template(scenario: str, regional_granularity: str, iasr_tables: dict[str:(DataFrame)], manually_extracted_tables: dict[str:(DataFrame)], filter_to_nem_regions: list[str] = None, filter_to_isp_sub_regions: list[str] = None) -> dict[str:(pd.DataFrame)]
Creates a template set of ISPyPSA input tables.
Examples:
Perform required imports.
>>> from pathlib import Path
>>> from ispypsa.config import load_config
>>> from ispypsa.data_fetch import read_csvs, write_csvs
>>> from ispypsa.templater import load_manually_extracted_tables
>>> from ispypsa.templater import create_ispypsa_inputs_template
Tables previously extracted from IASR workbook using isp_workbook_parser are loaded.
Some tables can't be handled by isp_workbook_parser so ISPyPSA ships with the missing data.
Now a template can be created by specifying the ISP scenario to use and the spacial granularity of model.
>>> ispypsa_inputs_template = create_ispypsa_inputs_template(
... scenario="Step Change",
... regional_granularity="sub_regions",
... iasr_tables=iasr_tables,
... manually_extracted_tables=manually_extracted_tables
... )
Write the template tables to a directory as CSVs.
Parameters:
-
scenario(str) –ISP scenario to generate template inputs based on.
-
regional_granularity(str) –the spatial granularity of the model template, "sub_regions", "nem_regions", or "single_region".
-
iasr_tables(dict[str:(DataFrame)]) –dictionary of dataframes providing the IASR input tables extracted using the
isp_workbook_parser. -
manually_extracted_tables(dict[str:(DataFrame)]) –dictionary of dataframes providing additional IASR tables that can't be parsed using
isp_workbook_parser -
filter_to_nem_regions(list[str], default:None) –Optional list of NEM region IDs (e.g., ['NSW', 'VIC']) to filter the template to. Cannot be specified together with filter_to_isp_sub_regions.
-
filter_to_isp_sub_regions(list[str], default:None) –Optional list of ISP sub-region IDs (e.g., ['CNSW', 'VIC', 'TAS']) to filter the template to. Cannot be specified together with filter_to_nem_regions.
Returns:
-
dict[str:(DataFrame)]–dictionary of dataframes in the
ISPyPSAformat
Raises:
-
ValueError–If both filter_to_nem_regions and filter_to_isp_sub_regions are provided
Source code in src\ispypsa\templater\create_template.py
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | |
ispypsa.templater.load_manually_extracted_tables
Retrieves the manually extracted template files for the IASR workbook version.
Some tables can't be handled by isp-workbook-parser so ISPyPSA ships with the
missing data pre-extracted for each supported workbook version.
Examples:
Perform required imports.
Load the manually extracted tables for the workbook version.
Access a specific table from the dictionary.
Parameters:
-
iasr_workbook_version(str) –str specifying which version of the workbook is being used to create the template.
Returns:
-
dict[str:(DataFrame)]–dict[str:
pd.DataFrame]
Source code in src\ispypsa\templater\manual_tables.py
Translation (PyPSA-Friendly Format)
ispypsa.translator.create_pypsa_friendly_inputs
create_pypsa_friendly_inputs(config: ModelConfig, ispypsa_tables: dict[str, DataFrame]) -> dict[str, pd.DataFrame]
Creates a set of tables for defining a PyPSA network from a set ISPyPSA tables.
Examples:
Perform requried imports.
>>> from pathlib import Path
>>> from ispypsa.config import load_config
>>> from ispypsa.data_fetch import read_csvs, write_csvs
>>> from ispypsa.translator import create_pypsa_friendly_inputs
Load ISPyPSA model config file and input tables.
>>> config = load_config(Path("ispypsa_config.yaml"))
>>> ispypsa_input_tables = read_csvs(Path("ispypsa_inputs_directory"))
Make the PyPSA friendly inputs!
>>> pypsa_friendly_inputs = create_pypsa_friendly_inputs(
... config=config,
... ispypsa_tables=ispypsa_input_tables
... )
Write the resulting dataframes to CSVs.
Parameters:
-
config(ModelConfig) –ISPyPSAispypsa.config.ModelConfigobject (add link to config docs). -
ispypsa_tables(dict[str, DataFrame]) –dictionary of dataframes providing the
ISPyPSAinput tables. (add link to ispypsa input tables docs).
dictionary of dataframes in the `PyPSA` friendly format. (add link to
-
dict[str, DataFrame]–pypsa friendly format table docs)
Source code in src\ispypsa\translator\create_pypsa_friendly.py
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 | |
ispypsa.translator.create_pypsa_friendly_snapshots
create_pypsa_friendly_snapshots(config: ModelConfig, model_phase: Literal['capacity_expansion', 'operational'], existing_generators: DataFrame | None = None, demand_traces: dict[str, DataFrame] | None = None, generator_traces: dict[str, DataFrame] | None = None) -> pd.DataFrame
Create a pd.DataFrame of PyPSA-compatible snapshots and associated investment periods for either the 'capacity_expansion' or 'operational' model phase.
This function also includes investment_periods for the 'operational' phase (even though they are not strictly required), because PyPSA expects both 'snapshots' and 'investment_periods' when overwriting an existing network.
Examples:
Perform required imports.
>>> from pathlib import Path
>>> from ispypsa.config import load_config
>>> from ispypsa.translator import create_pypsa_friendly_snapshots
Load the ISPyPSA configuration.
Create snapshots for capacity expansion modelling.
Create snapshots for operational modelling.
Parameters:
-
config(ModelConfig) –ispypsa.ModelConfig instance
-
model_phase(Literal['capacity_expansion', 'operational']) –string defining whether the snapshots are for the operational or capacity expansion phase of the modelling. This allows the correct temporal config inputs to be used from the ModelConfig instance.
-
existing_generators(DataFrame | None, default:None) –pd.DataFrame containing existing generators data, required if using named_representative_weeks with residual metrics (e.g., "residual-peak-demand", "residual-minimum-demand") (optional).
-
demand_traces(dict[str, DataFrame] | None, default:None) –dict[str, pd.DataFrame] with demand node names as keys and time series dataframes as values (optional, required for named_representative_weeks).
-
generator_traces(dict[str, DataFrame] | None, default:None) –dict[str, pd.DataFrame] with generator names as keys and time series dataframes as values (optional, required for residual metrics).
A pd.DataFrame containing the columns 'investment_periods' (int) defining
-
DataFrame–the investment a modelled inteval belongs to and 'snapshots' (datetime) defining
-
DataFrame–each time interval modelled. 'investment_periods' periods are refered to by the
-
DataFrame–year (financial or calander) in which they begin.
Source code in src\ispypsa\translator\snapshots.py
ispypsa.translator.create_pypsa_friendly_timeseries_inputs
create_pypsa_friendly_timeseries_inputs(config: ModelConfig, model_phase: Literal['capacity_expansion', 'operational'], ispypsa_tables: dict[str, DataFrame], generators: DataFrame, parsed_traces_directory: Path, pypsa_friendly_timeseries_inputs_location: Path, snapshots: DataFrame | None = None) -> pd.DataFrame
Creates snapshots and timeseries data files in PyPSA friendly format for generation and demand.
-
First creates snapshots based on the temporal configuration, optionally using named_representative_weeks and/or representative_weeks if configured. If snapshots are provided, they are used instead of generating new ones.
-
a time series file is created for each wind and solar generator in the new_entrant_generators table (table in ispypsa_tables dict). The time series data is saved in parquet files in the 'solar_traces' and 'wind_traces' directories with the columns "snapshots" (datetime) and "p_max_pu" (float specifying availability in MW).
-
a time series file is created for each generator in the translated generators table (table in pypsa_inputs dict) containing the marginal costs for each generator in each snapshot. The time series data is saved in parquet files in the 'marginal_costs' directory with the columns "snapshots" (datetime) and "marginal_cost" (float specifying marginal cost in $/MWh).
-
a time series file is created for each model region specifying the load in that region (regions set by config.network.nodes.regional_granularity). The time series data is saved in parquet files in the 'demand_traces' directory with the columns "snapshots" (datetime) and "p_set" (float specifying load in MW).
Examples:
Perform required imports.
>>> from pathlib import Path
>>> from ispypsa.config import load_config
>>> from ispypsa.data_fetch import read_csvs
>>> from ispypsa.translator import (
... create_pypsa_friendly_inputs,
... create_pypsa_friendly_timeseries_inputs
... )
Load config and ISPyPSA input tables.
>>> config = load_config(Path("ispypsa_config.yaml"))
>>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))
Create PyPSA-friendly inputs to get the generators table.
Create timeseries inputs for capacity expansion modelling.
>>> snapshots = create_pypsa_friendly_timeseries_inputs(
... config,
... "capacity_expansion",
... ispypsa_tables,
... pypsa_friendly_inputs["generators"],
... Path("parsed_traces"),
... Path("pypsa_friendly/timeseries")
... )
Parameters:
-
config(ModelConfig) –ISPyPSA ModelConfig instance.
-
model_phase(Literal['capacity_expansion', 'operational']) –Either "capacity_expansion" or "operational". Determines which temporal config settings to use.
-
ispypsa_tables(dict[str, DataFrame]) –Dictionary of ISPyPSA input tables. Must contain ecaa_generators, new_entrant_generators, sub_regions tables, and fuel cost tables for fuel types present in the generator tables.
-
generators(DataFrame) –PyPSA-friendly generators DataFrame (from create_pypsa_friendly_inputs). Used for calculating dynamic marginal costs.
-
parsed_traces_directory(Path) –Path to trace data parsed using isp-trace-parser.
-
pypsa_friendly_timeseries_inputs_location(Path) –Path where timeseries data will be saved.
-
snapshots(DataFrame | None, default:None) –Optional pre-defined snapshots DataFrame. If provided, must contain 'snapshots' (datetime) and 'investment_periods' (int) columns.
Returns:
-
DataFrame–pd.DataFrame containing the snapshots used for the timeseries.
Source code in src\ispypsa\translator\create_pypsa_friendly.py
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 | |
Model Building & Execution
ispypsa.pypsa_build.build_pypsa_network
build_pypsa_network(pypsa_friendly_tables: dict[str:(DataFrame)], path_to_pypsa_friendly_timeseries_data: Path)
Creates a pypsa.Network based on set of pypsa friendly input tables.
Examples:
Peform required imports.
>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs, write_csvs
>>> from ispypsa.pypsa_build import build_pypsa_network
Read in PyPSA friendly tables from CSV.
>>> pypsa_friendly_inputs = build_pypsa_network(
... pypsa_friendly_tables=pypsa_input_tables,
... path_to_pypsa_friendly_timeseries_data=Path("pypsa_friendly_timeseries_data")
... )
Then the model can be run in PyPSA
And the results saved to disk.
Parameters:
-
pypsa_friendly_tables(dict[str:(DataFrame)]) –dictionary of dataframes in the
PyPSAfriendly format. (add link to pypsa friendly format table docs) -
path_to_pypsa_friendly_timeseries_data(Path) –PathtoPyPSAfriendly time series data (add link to timeseries data docs.
Returns:
-
–
pypsa.Network: A PyPSA network object ready for optimisation.
Source code in src\ispypsa\pypsa_build\build.py
ispypsa.pypsa_build.update_network_timeseries
update_network_timeseries(network: Network, pypsa_friendly_input_tables: dict[str, DataFrame], snapshots: DataFrame, pypsa_friendly_timeseries_location: Path) -> None
Update the time series data in a pypsa.Network instance.
Designed to help convert capacity expansion network models into operational models but may also be useful in other circumstances, such as when running a capacity expansion model with different reference year cycles.
Examples:
>>> import pandas as pd
>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs
>>> from ispypsa.pypsa_build import update_network_timeseries
Get PyPSA friendly inputs (inparticular these need to contain the generators and buses tables).
Get the snapshots for the updated time series data.
Get the pypsa.Network we want to update the time series data in.
Create pd.Dataframe defining the set of snapshot (time intervals) to be used.
>>> update_network_timeseries(
... network,
... pypsa_friendly_input_tables,
... snapshots,
... Path("path/to/time/series/data/files")
... )
Parameters:
-
network(Network) –pypsa.Network which has set of generators, loads, and buses consistent with the updated time series data. i.e. if generator 'Y' exists in the existing network it also needs to exist in the updated time series data.
-
pypsa_friendly_input_tables(dict[str, DataFrame]) –dictionary of dataframes in the
PyPSAfriendly format. (add link to pypsa friendly format table docs) -
snapshots(DataFrame) –a pd.DataFrame containing the columns 'investment_periods' (int) defining the investment a modelled inteval belongs to and 'snapshots' (datetime) defining each time interval modelled. 'investment_periods' periods are refered to by the year (financial or calander) in which they begin.
-
pypsa_friendly_timeseries_location(Path) –PathtoPyPSAfriendly time series data (add link to timeseries data docs).
Returns: None
Source code in src\ispypsa\pypsa_build\update.py
ispypsa.pypsa_build.save_pypsa_network
Save the optimised PyPSA network as a NetCDF file.
Examples:
Perform required imports.
After running the model optimisation, save the network.
>>> network.optimize.solve_model(solver_name="highs")
>>> save_pypsa_network(
... network,
... save_directory=Path("outputs"),
... save_name="capacity_expansion"
... )
# Saves to outputs/capacity_expansion.nc
Save operational model results.
>>> save_pypsa_network(
... network,
... save_directory=Path("outputs"),
... save_name="operational"
... )
# Saves to outputs/operational.nc
Parameters:
-
network(Network) –The solved PyPSA network object.
-
save_directory(Path) –Directory where the network file should be saved.
-
save_name(str) –Name for the saved file (without .nc extension).
Returns:
-
None–None
Source code in src\ispypsa\pypsa_build\save.py
Tabular Results Extraction
ispypsa.results.extract_tabular_results
extract_tabular_results(network: Network, ispypsa_tables: dict[str, DataFrame]) -> dict[str:(pd.DataFrame)]
Extract the results from the PyPSA network and return a dictionary of results.
Extracts generation expansion, transmission expansion, dispatch, demand, and transmission flow results from the solved PyPSA network.
Examples:
Perform required imports.
>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs, write_csvs
>>> from ispypsa.results import extract_tabular_results
Load ISPyPSA input tables (needed for regions mapping).
After solving the network, extract the results.
>>> network.optimize.solve_model(solver_name="highs")
>>> results = extract_tabular_results(network, ispypsa_tables)
Access specific result tables.
>>> generation_expansion = results["generation_expansion"]
>>> transmission_flows = results["transmission_flows"]
Write results to CSV files.
Parameters:
-
network(Network) –The PyPSA network object.
-
ispypsa_tables(dict[str, DataFrame]) –Dictionary of ISPyPSA input tables (needed for regions mapping).
Returns:
-
dict[str:(DataFrame)]–A dictionary of results with the file name as the key and the results as the value.
Source code in src\ispypsa\results\extract.py
Plotting
ispypsa.plotting.create_plot_suite
Create a suite of plots for ISPyPSA modelling results.
Works for both capacity expansion and operational model results.
Examples:
Perform required imports.
>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs
>>> from ispypsa.results import extract_tabular_results
>>> from ispypsa.plotting import create_plot_suite, save_plots
Extract tabular results from the solved network.
>>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))
>>> results = extract_tabular_results(network, ispypsa_tables)
Create the plot suite from the results.
Save the plots to disk.
Parameters:
-
results(dict[str, DataFrame]) –A dictionary of tabular results from the ISPyPSA model. Should contain: - transmission_expansion - transmission_flows - nem_region_transmission_flows - isp_sub_region_transmission_flows - regions_and_zones_mapping - generator_dispatch - generation_expansion - demand
Returns:
-
dict[Path, dict]–A dictionary of plots with the file path as the key and the plot as the value.
Source code in src\ispypsa\plotting\plot.py
ispypsa.plotting.save_plots
Save a suite of Plotly plots and their underlying data to the plots directory.
All plots are saved as interactive HTML files with accompanying CSV data files.
Examples:
Perform required imports.
Create plots from results (see create_plot_suite for how to get results).
Save all plots to a directory.
>>> save_plots(plots, Path("outputs/capacity_expansion_plots"))
# Creates HTML files like:
# outputs/capacity_expansion_plots/transmission/aggregate_capacity.html
# outputs/capacity_expansion_plots/transmission/aggregate_capacity.csv
# outputs/capacity_expansion_plots/generation.html
# outputs/capacity_expansion_plots/generation.csv
Parameters:
-
charts(dict[Path, dict]) –A dictionary with file paths as keys and dicts with "plot" and "data" as values.
-
base_path(Path) –The path to the directory where the plots are saved.
Returns:
-
None–None
Source code in src\ispypsa\plotting\plot.py
ispypsa.plotting.generate_results_website
generate_results_website(plots: Dict[Path, dict], plots_dir: Path, output_dir: Path, site_name: str = 'ISPyPSA Results', output_filename: str = 'results_viewer.html', subtitle: str = 'Capacity Expansion Analysis', regions_and_zones_mapping: Optional[DataFrame] = None) -> None
Generate a static website for navigating ISPyPSA plot results.
Creates a single HTML file with a navigation pane that mirrors the directory structure of the plots.
Examples:
Perform required imports.
>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs
>>> from ispypsa.results import extract_tabular_results
>>> from ispypsa.plotting import (
... create_plot_suite,
... save_plots,
... generate_results_website
... )
Extract results and create plots.
>>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))
>>> results = extract_tabular_results(network, ispypsa_tables)
>>> plots = create_plot_suite(results)
Save plots to disk first.
Generate the results website.
>>> generate_results_website(
... plots,
... plots_dir,
... output_dir=Path("outputs"),
... output_filename="capacity_expansion_results_viewer.html",
... subtitle="Capacity Expansion Analysis",
... regions_and_zones_mapping=results["regions_and_zones_mapping"]
... )
# Creates outputs/capacity_expansion_results_viewer.html
Parameters:
-
plots(Dict[Path, dict]) –Dictionary with Path keys (plot file paths) and dict values containing "plot" and "data" keys (output from create_*_plot_suite)
-
plots_dir(Path) –Directory where plots are saved (e.g., outputs/capacity_expansion_plots)
-
output_dir(Path) –Directory where the website HTML should be saved (e.g., outputs directory)
-
site_name(str, default:'ISPyPSA Results') –Name of the website (default: "ISPyPSA Results")
-
output_filename(str, default:'results_viewer.html') –Name of the output HTML file (default: "results_viewer.html")
-
subtitle(str, default:'Capacity Expansion Analysis') –Subtitle to display in the header (default: "Capacity Expansion Analysis")
-
regions_and_zones_mapping(Optional[DataFrame], default:None) –Optional mapping table to ensure correct capitalization of region and zone IDs
Returns:
-
None–None. The website is saved as output_filename in output_dir
Source code in src\ispypsa\plotting\website.py
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 | |