Skip to content

ISPyPSA API

The ISPyPSA API is the set of Python functions used to carry out the ISPyPSA modelling workflow. When the CLI is used the API runs in the background to carry out the workflow.

Advanced users wanting more control or flexibility from the modelling workflow might want to use ISPyPSA directly through the API. An example of how the default ISPyPSA workflow is implemented using the API is provided below. We suggest API users start by running and understanding the default workflow and then adapting it for their use case.

Each API function is also documented individually after the workflow example.

API default workflow

Below is an example of the default ISPyPSA workflow implemented using the Python API. This is the same workflow which the CLI follows. To use this workflow you simply need to edit the code to point at an ispypsa_config.yaml file of your choice, then run the Python script.

from pathlib import Path

from ispypsa.config import load_config
from ispypsa.data_fetch import read_csvs, write_csvs
from ispypsa.iasr_table_caching import build_local_cache
from ispypsa.logging import configure_logging
from ispypsa.plotting import (
    create_plot_suite,
    generate_results_website,
    save_plots,
)
from ispypsa.pypsa_build import (
    build_pypsa_network,
    save_pypsa_network,
    update_network_timeseries,
)
from ispypsa.results import (
    extract_regions_and_zones_mapping,
    extract_tabular_results,
)
from ispypsa.templater import (
    create_ispypsa_inputs_template,
    load_manually_extracted_tables,
)
from ispypsa.translator import (
    create_pypsa_friendly_inputs,
    create_pypsa_friendly_timeseries_inputs,
)

# Load model config.
config_path = Path("ispypsa_config.yaml")
config = load_config(config_path)

# Load base paths from config
parsed_workbook_cache = Path(config.paths.parsed_workbook_cache)
parsed_traces_directory = (
    Path(config.paths.parsed_traces_directory) / f"isp_{config.trace_data.dataset_year}"
)
workbook_path = Path(config.paths.workbook_path)
run_directory = Path(config.paths.run_directory)

# Construct full paths from base paths
ispypsa_input_tables_directory = (
    run_directory / config.paths.ispypsa_run_name / "ispypsa_inputs"
)
pypsa_friendly_inputs_location = (
    run_directory / config.paths.ispypsa_run_name / "pypsa_friendly"
)
capacity_expansion_timeseries_location = (
    pypsa_friendly_inputs_location / "capacity_expansion_timeseries"
)
operational_timeseries_location = (
    pypsa_friendly_inputs_location / "operational_timeseries"
)
pypsa_outputs_directory = run_directory / config.paths.ispypsa_run_name / "outputs"
capacity_expansion_tabular_results_directory = (
    pypsa_outputs_directory / "capacity_expansion_tables"
)
capacity_expansion_plot_results_directory = (
    pypsa_outputs_directory / "capacity_expansion_plots"
)
operational_tabular_results_directory = pypsa_outputs_directory / "operational_tables"
operational_plot_results_directory = pypsa_outputs_directory / "operational_plots"

# Create output directories if they don't exist
parsed_workbook_cache.mkdir(parents=True, exist_ok=True)
ispypsa_input_tables_directory.mkdir(parents=True, exist_ok=True)
pypsa_friendly_inputs_location.mkdir(parents=True, exist_ok=True)
capacity_expansion_timeseries_location.mkdir(parents=True, exist_ok=True)
operational_timeseries_location.mkdir(parents=True, exist_ok=True)
pypsa_outputs_directory.mkdir(parents=True, exist_ok=True)
capacity_expansion_tabular_results_directory.mkdir(parents=True, exist_ok=True)
capacity_expansion_plot_results_directory.mkdir(parents=True, exist_ok=True)
operational_tabular_results_directory.mkdir(parents=True, exist_ok=True)
operational_plot_results_directory.mkdir(parents=True, exist_ok=True)

configure_logging()

# Build the local cache from the workbook
build_local_cache(parsed_workbook_cache, workbook_path, config.iasr_workbook_version)

# Load ISP IASR data tables.
iasr_tables = read_csvs(parsed_workbook_cache)
manually_extracted_tables = load_manually_extracted_tables(config.iasr_workbook_version)

# Create ISPyPSA inputs from IASR tables.
ispypsa_tables = create_ispypsa_inputs_template(
    config.scenario,
    config.network.nodes.regional_granularity,
    iasr_tables,
    manually_extracted_tables,
    config.filter_by_nem_regions,
    config.filter_by_isp_sub_regions,
)
write_csvs(ispypsa_tables, ispypsa_input_tables_directory)

# Suggested stage of user interaction:
# At this stage of the workflow the user can modify ispypsa input files, either
# manually or programmatically, to run alternative scenarios using the template
# generated from the chosen ISP scenario.

# Translate ISPyPSA format to a PyPSA friendly format.
pypsa_friendly_input_tables = create_pypsa_friendly_inputs(config, ispypsa_tables)

# Create timeseries inputs and snapshots
pypsa_friendly_input_tables["snapshots"] = create_pypsa_friendly_timeseries_inputs(
    config,
    "capacity_expansion",
    ispypsa_tables,
    pypsa_friendly_input_tables["generators"],
    parsed_traces_directory,
    capacity_expansion_timeseries_location,
)

write_csvs(pypsa_friendly_input_tables, pypsa_friendly_inputs_location)

# Build a PyPSA network object.
network = build_pypsa_network(
    pypsa_friendly_input_tables,
    capacity_expansion_timeseries_location,
)

# Solve for least cost operation/expansion
# Never use network.optimize() as this will remove custom constraints.
network.optimize.solve_model(solver_name=config.solver)

# Save capacity expansion results
save_pypsa_network(network, pypsa_outputs_directory, "capacity_expansion")
capacity_expansion_results = extract_tabular_results(network, ispypsa_tables)
capacity_expansion_results["regions_and_zones_mapping"] = (
    extract_regions_and_zones_mapping(ispypsa_tables)
)
write_csvs(capacity_expansion_results, capacity_expansion_tabular_results_directory)

# Create and save capacity expansion plots
capacity_expansion_plots = create_plot_suite(capacity_expansion_results)
save_plots(capacity_expansion_plots, capacity_expansion_plot_results_directory)

# Generate capacity expansion results website
generate_results_website(
    capacity_expansion_plots,
    capacity_expansion_plot_results_directory,
    pypsa_outputs_directory,
    site_name=f"{config.paths.ispypsa_run_name}",
    output_filename="capacity_expansion_results_viewer.html",
    subtitle="Capacity Expansion Analysis",
    regions_and_zones_mapping=capacity_expansion_results["regions_and_zones_mapping"],
)

# Operational modelling extension
operational_snapshots = create_pypsa_friendly_timeseries_inputs(
    config,
    "operational",
    ispypsa_tables,
    pypsa_friendly_input_tables["generators"],
    parsed_traces_directory,
    operational_timeseries_location,
)

write_csvs(
    {"operational_snapshots": operational_snapshots}, pypsa_friendly_inputs_location
)

update_network_timeseries(
    network,
    pypsa_friendly_input_tables,
    operational_snapshots,
    operational_timeseries_location,
)

network.optimize.fix_optimal_capacities()

# Never use network.optimize() as this will remove custom constraints.
network.optimize.optimize_with_rolling_horizon(
    horizon=config.temporal.operational.horizon,
    overlap=config.temporal.operational.overlap,
)

save_pypsa_network(network, pypsa_outputs_directory, "operational")

# Extract and save operational results
operational_results = extract_tabular_results(network, ispypsa_tables)
operational_results["regions_and_zones_mapping"] = extract_regions_and_zones_mapping(
    ispypsa_tables
)
write_csvs(operational_results, operational_tabular_results_directory)

# Create and save operational plots
operational_plots = create_plot_suite(operational_results)
save_plots(operational_plots, operational_plot_results_directory)

# Generate operational results website
generate_results_website(
    operational_plots,
    operational_plot_results_directory,
    pypsa_outputs_directory,
    site_name=f"{config.paths.ispypsa_run_name}",
    output_filename="operational_results_viewer.html",
    subtitle="Operational Analysis",
    regions_and_zones_mapping=operational_results["regions_and_zones_mapping"],
)

Configuration & Logging

ispypsa.config.load_config

load_config(config_path: str | Path) -> ModelConfig

Load and validate configuration from a YAML file.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.config import load_config

Load the configuration from a YAML file.

>>> config = load_config(Path("ispypsa_config.yaml"))

Access configuration values.

>>> config.scenario
'Step Change'
>>> config.network.nodes.regional_granularity
'sub_regions'

Parameters:

  • config_path (str | Path) –

    Path to the YAML configuration file

Returns:

  • ModelConfig ( ModelConfig ) –

    Validated configuration object

Raises:

  • ValidationError

    If the configuration is invalid

  • FileNotFoundError

    If the config file doesn't exist

  • YAMLError

    If the YAML is malformed

Source code in src\ispypsa\config\loader.py
def load_config(config_path: str | Path) -> ModelConfig:
    """
    Load and validate configuration from a YAML file.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.config import load_config

        Load the configuration from a YAML file.
        >>> config = load_config(Path("ispypsa_config.yaml"))

        Access configuration values.
        >>> config.scenario
        'Step Change'
        >>> config.network.nodes.regional_granularity
        'sub_regions'

    Args:
        config_path: Path to the YAML configuration file

    Returns:
        ModelConfig: Validated configuration object

    Raises:
        ValidationError: If the configuration is invalid
        FileNotFoundError: If the config file doesn't exist
        yaml.YAMLError: If the YAML is malformed
    """
    with open(config_path) as f:
        config_dict = yaml.safe_load(f)

    return ModelConfig(**config_dict)

ispypsa.logging.configure_logging

configure_logging(console: bool = True, console_level: int = logging.WARNING, file: bool = True, file_level: int = logging.INFO, log_file: str = 'ISPyPSA.log') -> None

Configures ISPyPSA logging

Examples:

Perform required imports.

>>> import logging
>>> from ispypsa.logging import configure_logging

Configure logging with default settings (console warnings, file info).

>>> configure_logging()

Configure logging with custom settings.

>>> configure_logging(
...     console=True,
...     console_level=logging.INFO,
...     file=True,
...     file_level=logging.DEBUG,
...     log_file="my_run.log"
... )

Disable file logging.

>>> configure_logging(file=False)

Parameters:

  • console (bool, default: True ) –

    Whether to log to the console. Defaults to True.

  • console_level (int, default: WARNING ) –

    Level of the console logging. Defaults to logging.WARNING.

  • file (bool, default: True ) –

    Whether to log to a log file. Defaults to True.

  • file_level (int, default: INFO ) –

    Level of the file logging. Defaults to logging.INFO.

  • log_file (str, default: 'ISPyPSA.log' ) –

    Name of the logging file. Defaults to "ISPyPSA.log".

Returns:

  • None

    None

Source code in src\ispypsa\logging.py
def configure_logging(
    console: bool = True,
    console_level: int = logging.WARNING,
    file: bool = True,
    file_level: int = logging.INFO,
    log_file: str = "ISPyPSA.log",
) -> None:
    """Configures ISPyPSA logging

    Examples:
        Perform required imports.
        >>> import logging
        >>> from ispypsa.logging import configure_logging

        Configure logging with default settings (console warnings, file info).
        >>> configure_logging()

        Configure logging with custom settings.
        >>> configure_logging(
        ...     console=True,
        ...     console_level=logging.INFO,
        ...     file=True,
        ...     file_level=logging.DEBUG,
        ...     log_file="my_run.log"
        ... )

        Disable file logging.
        >>> configure_logging(file=False)

    Args:
        console: Whether to log to the console. Defaults to True.
        console_level: Level of the console logging. Defaults to logging.WARNING.
        file: Whether to log to a log file. Defaults to True.
        file_level: Level of the file logging. Defaults to logging.INFO.
        log_file: Name of the logging file. Defaults to "ISPyPSA.log".

    Returns:
        None
    """
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    handlers = []
    if console:
        console_handler = logging.StreamHandler(stream=sys.stdout)
        console_handler.setLevel(console_level)
        console_formatter = logging.Formatter("%(levelname)s: %(message)s")
        console_handler.setFormatter(console_formatter)
        handlers.append(console_handler)
    if file:
        file_handler = logging.FileHandler(log_file)
        file_handler.setLevel(file_level)
        file_formatter = logging.Formatter("%(asctime)s - %(levelname)s: %(message)s")
        file_handler.setFormatter(file_formatter)
        handlers.append(file_handler)
    if not handlers:
        handlers.append(logging.NullHandler())
    logging.basicConfig(
        level=logging.INFO,
        format="[%(asctime)s] %(levelname)s: %(message)s",
        handlers=handlers,
    )
    configure_dependency_logger("pypsa", logging.INFO)

Data Fetching & Caching

ispypsa.iasr_table_caching.build_local_cache

build_local_cache(cache_path: Path | str, workbook_path: Path | str, iasr_workbook_version: str) -> None

Uses isp-workbook-parser to build a local cache of parsed workbook CSVs

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.iasr_table_caching import build_local_cache

Build the local cache of parsed workbook CSVs.

>>> build_local_cache(
...     cache_path=Path("parsed_workbook_cache"),
...     workbook_path=Path("path/to/ISP_Workbook.xlsx"),
...     iasr_workbook_version="6.0"
... )

Parameters:

  • cache_path (Path | str) –

    Path that should be created for the local cache

  • workbook_path (Path | str) –

    Path to an ISP Assumptions Workbook that is supported by isp-workbook-parser

  • iasr_workbook_version (str) –

    str specifying the version of the work being used.

Returns:

  • None

    None

Source code in src\ispypsa\iasr_table_caching\local_cache.py
def build_local_cache(
    cache_path: Path | str, workbook_path: Path | str, iasr_workbook_version: str
) -> None:
    """Uses `isp-workbook-parser` to build a local cache of parsed workbook CSVs

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.iasr_table_caching import build_local_cache

        Build the local cache of parsed workbook CSVs.
        >>> build_local_cache(
        ...     cache_path=Path("parsed_workbook_cache"),
        ...     workbook_path=Path("path/to/ISP_Workbook.xlsx"),
        ...     iasr_workbook_version="6.0"
        ... )

    Args:
        cache_path: Path that should be created for the local cache
        workbook_path: Path to an ISP Assumptions Workbook that is supported by
            `isp-workbook-parser`
        iasr_workbook_version: str specifying the version of the work being used.

    Returns:
        None
    """
    workbook = Parser(Path(workbook_path))
    if workbook.workbook_version != iasr_workbook_version:
        raise ValueError(
            "The IASR workbook provided does not match the version "
            "specified in the config."
        )
    tables_to_get = REQUIRED_TABLES
    workbook.save_tables(cache_path, tables=tables_to_get)
    return None

ispypsa.data_fetch.read_csvs

read_csvs(directory: Path | str) -> dict[str:(pd.DataFrame)]

Read all the CSVs in a directory into a dictionary with filenames (without csv extension) as keys.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs

Read CSVs from a directory containing parsed workbook tables.

>>> iasr_tables = read_csvs(Path("parsed_workbook_cache"))

Read CSVs from a directory containing ISPyPSA input tables.

>>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))

Access a specific table from the dictionary.

>>> generators = iasr_tables["existing_generators_summary"]

Parameters:

  • directory (Path | str) –

    Path to directory to read CSVs from.

Returns:

  • dict[str:(DataFrame)]

    dict[str, pd.DataFrame]: Dictionary with filenames (without .csv extension)

  • dict[str:(DataFrame)]

    as keys and DataFrames as values.

Source code in src\ispypsa\data_fetch\csv_read_write.py
def read_csvs(directory: Path | str) -> dict[str : pd.DataFrame]:
    """Read all the CSVs in a directory into a dictionary with filenames (without csv
    extension) as keys.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.data_fetch import read_csvs

        Read CSVs from a directory containing parsed workbook tables.
        >>> iasr_tables = read_csvs(Path("parsed_workbook_cache"))

        Read CSVs from a directory containing ISPyPSA input tables.
        >>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))

        Access a specific table from the dictionary.
        >>> generators = iasr_tables["existing_generators_summary"]

    Args:
        directory: Path to directory to read CSVs from.

    Returns:
        dict[str, pd.DataFrame]: Dictionary with filenames (without .csv extension)
        as keys and DataFrames as values.
    """
    files = Path(directory).glob("*.csv")
    return {file.name[:-4]: pd.read_csv(file) for file in files}

ispypsa.data_fetch.write_csvs

write_csvs(data_dict: dict[str:(DataFrame)], directory: Path | str)

Write all pd.DataFrames in a dictionary with filenames as keys (without csv extension) to CSVs.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.data_fetch import write_csvs

Write ISPyPSA input tables to a directory.

>>> write_csvs(ispypsa_tables, Path("ispypsa_inputs"))

Write PyPSA-friendly tables to a directory.

>>> write_csvs(pypsa_friendly_tables, Path("pypsa_friendly_inputs"))

Write model results to a directory.

>>> write_csvs(results, Path("outputs/results"))

Parameters:

  • data_dict (dict[str:(DataFrame)]) –

    Dictionary of pd.DataFrames to write to csv files.

  • directory (Path | str) –

    Path to directory to save CSVs to.

Returns:

  • None

Source code in src\ispypsa\data_fetch\csv_read_write.py
def write_csvs(data_dict: dict[str : pd.DataFrame], directory: Path | str):
    """Write all pd.DataFrames in a dictionary with filenames as keys (without csv extension)
    to CSVs.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.data_fetch import write_csvs

        Write ISPyPSA input tables to a directory.
        >>> write_csvs(ispypsa_tables, Path("ispypsa_inputs"))

        Write PyPSA-friendly tables to a directory.
        >>> write_csvs(pypsa_friendly_tables, Path("pypsa_friendly_inputs"))

        Write model results to a directory.
        >>> write_csvs(results, Path("outputs/results"))

    Args:
        data_dict: Dictionary of pd.DataFrames to write to csv files.
        directory: Path to directory to save CSVs to.

    Returns:
        None
    """
    for file_name, data in data_dict.items():
        save_path = Path(directory) / Path(f"{file_name}.csv")
        save_path.parent.mkdir(parents=True, exist_ok=True)
        # set index=False to avoid adding "Unnamed" cols if/when reading from these csvs later
        data.to_csv(save_path, index=False)

ispypsa.data_fetch.fetch_workbook

fetch_workbook(workbook_version: str, save_path: Path | str) -> None

Download ISP workbook file.

Downloads the ISP workbook for the specified version from the manifest to the specified file path.

Examples:

>>> fetch_workbook("6.0", "data/workbooks/isp_6.0.xlsx")
# Downloads ISP 6.0 workbook to data/workbooks/isp_6.0.xlsx

Parameters:

  • workbook_version

    str Version string (e.g., "6.0")

  • save_path

    Path | str Full path where the workbook file should be saved (e.g., "data/workbooks/6.0.xlsx")

Returns:

  • None

    None

Raises:

  • FileNotFoundError

    If the manifest file does not exist

  • HTTPError

    If the download fails

Source code in src\ispypsa\data_fetch\download.py
def fetch_workbook(
    workbook_version: str,
    save_path: Path | str,
) -> None:
    """Download ISP workbook file.

    Downloads the ISP workbook for the specified version from the manifest
    to the specified file path.

    Examples:
        >>> fetch_workbook("6.0", "data/workbooks/isp_6.0.xlsx")
        # Downloads ISP 6.0 workbook to data/workbooks/isp_6.0.xlsx

    Args:
        workbook_version : str
            Version string (e.g., "6.0")
        save_path : Path | str
            Full path where the workbook file should be saved
            (e.g., "data/workbooks/6.0.xlsx")

    Returns:
        None

    Raises:
        FileNotFoundError: If the manifest file does not exist
        requests.HTTPError: If the download fails
    """
    # Construct manifest path
    manifest_path = (
        Path(__file__).parent / "manifests" / "workbooks" / f"{workbook_version}.txt"
    )

    if not manifest_path.exists():
        raise FileNotFoundError(f"Manifest file not found: {manifest_path}")

    # Read URL from manifest (should be single URL)
    with open(manifest_path) as f:
        urls = [line.strip() for line in f if line.strip()]

    url = urls[0]
    save_path = Path(save_path)

    # Create parent directories
    save_path.parent.mkdir(parents=True, exist_ok=True)

    # Download file
    response = requests.get(url, stream=True)
    response.raise_for_status()

    # Get file size if available
    total_size = int(response.headers.get("content-length", 0))

    # Write file with progress bar
    with (
        open(save_path, "wb") as f,
        tqdm(
            total=total_size,
            unit="B",
            unit_scale=True,
            unit_divisor=1024,
            desc=f"Downloading {save_path.name}",
        ) as pbar,
    ):
        for chunk in response.iter_content(chunk_size=8192):
            f.write(chunk)
            pbar.update(len(chunk))

Templating (ISPyPSA Input Creation)

ispypsa.templater.create_ispypsa_inputs_template

create_ispypsa_inputs_template(scenario: str, regional_granularity: str, iasr_tables: dict[str:(DataFrame)], manually_extracted_tables: dict[str:(DataFrame)], filter_to_nem_regions: list[str] = None, filter_to_isp_sub_regions: list[str] = None) -> dict[str:(pd.DataFrame)]

Creates a template set of ISPyPSA input tables.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.config import load_config
>>> from ispypsa.data_fetch import read_csvs, write_csvs
>>> from ispypsa.templater import load_manually_extracted_tables
>>> from ispypsa.templater import create_ispypsa_inputs_template

Tables previously extracted from IASR workbook using isp_workbook_parser are loaded.

>>> iasr_tables = read_csvs(Path("iasr_directory"))

Some tables can't be handled by isp_workbook_parser so ISPyPSA ships with the missing data.

>>> manually_extracted_tables = load_manually_extracted_tables("6.0")

Now a template can be created by specifying the ISP scenario to use and the spacial granularity of model.

>>> ispypsa_inputs_template = create_ispypsa_inputs_template(
... scenario="Step Change",
... regional_granularity="sub_regions",
... iasr_tables=iasr_tables,
... manually_extracted_tables=manually_extracted_tables
... )

Write the template tables to a directory as CSVs.

>>> write_csvs(ispypsa_inputs_template, Path("ispypsa_inputs"))

Parameters:

  • scenario (str) –

    ISP scenario to generate template inputs based on.

  • regional_granularity (str) –

    the spatial granularity of the model template, "sub_regions", "nem_regions", or "single_region".

  • iasr_tables (dict[str:(DataFrame)]) –

    dictionary of dataframes providing the IASR input tables extracted using the isp_workbook_parser.

  • manually_extracted_tables (dict[str:(DataFrame)]) –

    dictionary of dataframes providing additional IASR tables that can't be parsed using isp_workbook_parser

  • filter_to_nem_regions (list[str], default: None ) –

    Optional list of NEM region IDs (e.g., ['NSW', 'VIC']) to filter the template to. Cannot be specified together with filter_to_isp_sub_regions.

  • filter_to_isp_sub_regions (list[str], default: None ) –

    Optional list of ISP sub-region IDs (e.g., ['CNSW', 'VIC', 'TAS']) to filter the template to. Cannot be specified together with filter_to_nem_regions.

Returns:

  • dict[str:(DataFrame)]

    dictionary of dataframes in the ISPyPSA format

Raises:

  • ValueError

    If both filter_to_nem_regions and filter_to_isp_sub_regions are provided

Source code in src\ispypsa\templater\create_template.py
def create_ispypsa_inputs_template(
    scenario: str,
    regional_granularity: str,
    iasr_tables: dict[str : pd.DataFrame],
    manually_extracted_tables: dict[str : pd.DataFrame],
    filter_to_nem_regions: list[str] = None,
    filter_to_isp_sub_regions: list[str] = None,
) -> dict[str : pd.DataFrame]:
    """Creates a template set of [`ISPyPSA` input tables](tables/ispypsa.md).

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.config import load_config
        >>> from ispypsa.data_fetch import read_csvs, write_csvs
        >>> from ispypsa.templater import load_manually_extracted_tables
        >>> from ispypsa.templater import create_ispypsa_inputs_template

        Tables previously extracted from IASR workbook using isp_workbook_parser are
        loaded.
        >>> iasr_tables = read_csvs(Path("iasr_directory"))

        Some tables can't be handled by isp_workbook_parser so ISPyPSA ships with the
        missing data.
        >>> manually_extracted_tables = load_manually_extracted_tables("6.0")

        Now a template can be created by specifying the ISP scenario to use and the
        spacial granularity of model.
        >>> ispypsa_inputs_template = create_ispypsa_inputs_template(
        ... scenario="Step Change",
        ... regional_granularity="sub_regions",
        ... iasr_tables=iasr_tables,
        ... manually_extracted_tables=manually_extracted_tables
        ... )

        Write the template tables to a directory as CSVs.
        >>> write_csvs(ispypsa_inputs_template, Path("ispypsa_inputs"))

    Args:
        scenario: ISP scenario to generate template inputs based on.
        regional_granularity: the spatial granularity of the model template,
            "sub_regions", "nem_regions", or "single_region".
        iasr_tables: dictionary of dataframes providing the IASR input tables
            extracted using the `isp_workbook_parser`.
        manually_extracted_tables: dictionary of dataframes providing additional
            IASR tables that can't be parsed using `isp_workbook_parser`
        filter_to_nem_regions: Optional list of NEM region IDs (e.g., ['NSW', 'VIC'])
            to filter the template to. Cannot be specified together with
            filter_to_isp_sub_regions.
        filter_to_isp_sub_regions: Optional list of ISP sub-region IDs
            (e.g., ['CNSW', 'VIC', 'TAS']) to filter the template to. Cannot be
            specified together with filter_to_nem_regions.

    Returns:
        dictionary of dataframes in the [`ISPyPSA` format](tables/ispypsa.md)

    Raises:
        ValueError: If both filter_to_nem_regions and filter_to_isp_sub_regions are provided
    """
    # Validate filtering parameters
    if filter_to_nem_regions is not None and filter_to_isp_sub_regions is not None:
        raise ValueError(
            "Cannot specify both filter_to_nem_regions and filter_to_isp_sub_regions"
        )

    template = {}

    template.update(manually_extracted_tables)

    if regional_granularity == "sub_regions":
        template["sub_regions"] = _template_sub_regions(
            iasr_tables["sub_regional_reference_nodes"], mapping_only=False
        )

        template["flow_paths"] = _template_sub_regional_flow_paths(
            iasr_tables["flow_path_transfer_capability"]
        )

        template["flow_path_expansion_costs"] = _template_sub_regional_flow_path_costs(
            iasr_tables,
            scenario,
        )

    elif regional_granularity == "nem_regions":
        template["sub_regions"] = _template_sub_regions(
            iasr_tables["sub_regional_reference_nodes"], mapping_only=True
        )

        template["nem_regions"] = _template_regions(
            iasr_tables["regional_reference_nodes"]
        )

        template["flow_paths"] = _template_regional_interconnectors(
            iasr_tables["interconnector_transfer_capability"]
        )

    else:
        template["sub_regions"] = _template_sub_regions(
            iasr_tables["sub_regional_reference_nodes"], mapping_only=True
        )

    template["renewable_energy_zones"] = _template_rez_build_limits(
        iasr_tables["initial_build_limits"], scenario
    )

    possible_rez_or_constraint_names = list(
        set(
            list(template["renewable_energy_zones"]["rez_id"])
            + list(template["custom_constraints_rhs"]["constraint_id"])
        )
    )

    template["rez_transmission_expansion_costs"] = _template_rez_transmission_costs(
        iasr_tables,
        scenario,
        possible_rez_or_constraint_names,
    )

    template["ecaa_generators"] = _template_ecaa_generators_static_properties(
        iasr_tables
    )

    template["new_entrant_generators"] = _template_new_generators_static_properties(
        iasr_tables
    )

    ecaa_batteries, new_entrant_batteries = _template_battery_properties(iasr_tables)
    template["ecaa_batteries"] = ecaa_batteries
    template["new_entrant_batteries"] = new_entrant_batteries

    dynamic_generator_property_templates = _template_generator_dynamic_properties(
        iasr_tables, scenario
    )

    template.update(dynamic_generator_property_templates)

    energy_policy_targets = _template_energy_policy_targets(iasr_tables, scenario)

    template.update(energy_policy_targets)

    # Apply regional filtering if requested
    if filter_to_nem_regions or filter_to_isp_sub_regions:
        template = _filter_template(
            template,
            nem_regions=filter_to_nem_regions,
            isp_sub_regions=filter_to_isp_sub_regions,
        )

    return template

ispypsa.templater.load_manually_extracted_tables

load_manually_extracted_tables(iasr_workbook_version: str) -> dict[str:(pd.DataFrame)]

Retrieves the manually extracted template files for the IASR workbook version.

Some tables can't be handled by isp-workbook-parser so ISPyPSA ships with the missing data pre-extracted for each supported workbook version.

Examples:

Perform required imports.

>>> from ispypsa.templater import load_manually_extracted_tables

Load the manually extracted tables for the workbook version.

>>> manually_extracted_tables = load_manually_extracted_tables("6.0")

Access a specific table from the dictionary.

>>> custom_constraints_rhs = manually_extracted_tables["custom_constraints_rhs"]

Parameters:

  • iasr_workbook_version (str) –

    str specifying which version of the workbook is being used to create the template.

Returns:

  • dict[str:(DataFrame)]

    dict[str: pd.DataFrame]

Source code in src\ispypsa\templater\manual_tables.py
def load_manually_extracted_tables(
    iasr_workbook_version: str,
) -> dict[str : pd.DataFrame]:
    """Retrieves the manually extracted template files for the IASR workbook version.

    Some tables can't be handled by `isp-workbook-parser` so ISPyPSA ships with the
    missing data pre-extracted for each supported workbook version.

    Examples:
        Perform required imports.
        >>> from ispypsa.templater import load_manually_extracted_tables

        Load the manually extracted tables for the workbook version.
        >>> manually_extracted_tables = load_manually_extracted_tables("6.0")

        Access a specific table from the dictionary.
        >>> custom_constraints_rhs = manually_extracted_tables["custom_constraints_rhs"]

    Args:
        iasr_workbook_version: str specifying which version of the workbook is being
            used to create the template.

    Returns:
        dict[str: `pd.DataFrame`]
    """
    path_to_tables = (
        Path(__file__).parent
        / Path("manually_extracted_template_tables")
        / Path(iasr_workbook_version)
    )
    csv_files = path_to_tables.glob("*.csv")
    df_files = {}
    for file in csv_files:
        df_files[file.name.replace(".csv", "")] = pd.read_csv(file)
    return df_files

Translation (PyPSA-Friendly Format)

ispypsa.translator.create_pypsa_friendly_inputs

create_pypsa_friendly_inputs(config: ModelConfig, ispypsa_tables: dict[str, DataFrame]) -> dict[str, pd.DataFrame]

Creates a set of tables for defining a PyPSA network from a set ISPyPSA tables.

Examples:

Perform requried imports.

>>> from pathlib import Path
>>> from ispypsa.config import load_config
>>> from ispypsa.data_fetch import read_csvs, write_csvs
>>> from ispypsa.translator import create_pypsa_friendly_inputs

Load ISPyPSA model config file and input tables.

>>> config = load_config(Path("ispypsa_config.yaml"))
>>> ispypsa_input_tables = read_csvs(Path("ispypsa_inputs_directory"))

Make the PyPSA friendly inputs!

>>> pypsa_friendly_inputs = create_pypsa_friendly_inputs(
... config=config,
... ispypsa_tables=ispypsa_input_tables
... )

Write the resulting dataframes to CSVs.

>>> write_csvs(pypsa_friendly_inputs, Path("pypsa_friendly_inputs"))

Parameters:

  • config (ModelConfig) –

    ISPyPSA ispypsa.config.ModelConfig object (add link to config docs).

  • ispypsa_tables (dict[str, DataFrame]) –

    dictionary of dataframes providing the ISPyPSA input tables. (add link to ispypsa input tables docs).

dictionary of dataframes in the `PyPSA` friendly format. (add link to

  • dict[str, DataFrame]

    pypsa friendly format table docs)

Source code in src\ispypsa\translator\create_pypsa_friendly.py
def create_pypsa_friendly_inputs(
    config: ModelConfig, ispypsa_tables: dict[str, pd.DataFrame]
) -> dict[str, pd.DataFrame]:
    """Creates a set of tables for defining a `PyPSA` network from a set `ISPyPSA` tables.

    Examples:
        Perform requried imports.
        >>> from pathlib import Path
        >>> from ispypsa.config import load_config
        >>> from ispypsa.data_fetch import read_csvs, write_csvs
        >>> from ispypsa.translator import create_pypsa_friendly_inputs

        Load ISPyPSA model config file and input tables.
        >>> config = load_config(Path("ispypsa_config.yaml"))
        >>> ispypsa_input_tables = read_csvs(Path("ispypsa_inputs_directory"))

        Make the PyPSA friendly inputs!
        >>> pypsa_friendly_inputs = create_pypsa_friendly_inputs(
        ... config=config,
        ... ispypsa_tables=ispypsa_input_tables
        ... )

        Write the resulting dataframes to CSVs.
        >>> write_csvs(pypsa_friendly_inputs, Path("pypsa_friendly_inputs"))

    Args:
        config: `ISPyPSA` `ispypsa.config.ModelConfig` object (add link to config docs).
        ispypsa_tables: dictionary of dataframes providing the `ISPyPSA` input tables.
            (add link to ispypsa input tables docs).

    Returns: dictionary of dataframes in the `PyPSA` friendly format. (add link to
        pypsa friendly format table docs)
    """
    pypsa_inputs = {}

    pypsa_inputs["investment_period_weights"] = _create_investment_period_weightings(
        config.temporal.capacity_expansion.investment_periods,
        config.temporal.range.end_year,
        config.discount_rate,
    )

    translated_generators = []
    translated_ecaa_generators = _translate_ecaa_generators(
        ispypsa_tables,
        config.temporal.capacity_expansion.investment_periods,
        config.network.nodes.regional_granularity,
        config.network.nodes.rezs,
        config.temporal.year_type,
    )
    _append_if_not_empty(translated_generators, translated_ecaa_generators)

    translated_new_entrant_generators = _translate_new_entrant_generators(
        ispypsa_tables,
        config.temporal.capacity_expansion.investment_periods,
        config.discount_rate,
        config.network.nodes.regional_granularity,
        config.network.nodes.rezs,
    )
    _append_if_not_empty(translated_generators, translated_new_entrant_generators)

    if len(translated_generators) > 0:
        pypsa_inputs["generators"] = pd.concat(
            translated_generators,
            axis=0,
            ignore_index=True,
        )
    else:
        # TODO: Log, improve error message (/ is this the right place for the error?)
        raise ValueError("No generator data returned from translator.")

    batteries = []
    translated_ecaa_batteries = _translate_ecaa_batteries(
        ispypsa_tables,
        config.temporal.capacity_expansion.investment_periods,
        config.network.nodes.regional_granularity,
        config.network.nodes.rezs,
        config.temporal.year_type,
    )
    _append_if_not_empty(batteries, translated_ecaa_batteries)

    translated_new_entrant_batteries = _translate_new_entrant_batteries(
        ispypsa_tables,
        config.temporal.capacity_expansion.investment_periods,
        config.discount_rate,
        config.network.nodes.regional_granularity,
        config.network.nodes.rezs,
    )
    _append_if_not_empty(batteries, translated_new_entrant_batteries)

    if len(batteries) > 0:
        pypsa_inputs["batteries"] = pd.concat(
            batteries,
            axis=0,
            ignore_index=True,
        )
    else:
        logging.warning(
            "No battery data returned from translator - no batteries added to model."
        )
        # raise an error? Improve the message for sure

    buses = []
    links = []

    if config.network.nodes.regional_granularity == "sub_regions":
        buses.append(_translate_isp_sub_regions_to_buses(ispypsa_tables["sub_regions"]))
    elif config.network.nodes.regional_granularity == "nem_regions":
        buses.append(_translate_nem_regions_to_buses(ispypsa_tables["nem_regions"]))
    elif config.network.nodes.regional_granularity == "single_region":
        buses.append(_create_single_region_bus())

    if config.unserved_energy.cost is not None:
        unserved_energy_generators = _create_unserved_energy_generators(
            buses[0],  # create generators for just demand buses not rez buses too.
            config.unserved_energy.cost,
            config.unserved_energy.max_per_node,
        )
        pypsa_inputs["generators"] = pd.concat(
            [pypsa_inputs["generators"], unserved_energy_generators], ignore_index=True
        )

    if config.network.nodes.rezs == "discrete_nodes":
        buses.append(_translate_rezs_to_buses(ispypsa_tables["renewable_energy_zones"]))
        links.append(
            _translate_renewable_energy_zone_build_limits_to_links(
                ispypsa_tables["renewable_energy_zones"],
                ispypsa_tables["rez_transmission_expansion_costs"],
                config,
            )
        )

    if config.network.nodes.regional_granularity != "single_region":
        links.append(_translate_flow_paths_to_links(ispypsa_tables, config))

    pypsa_inputs["buses"] = pd.concat(buses)

    if len(links) > 0:
        pypsa_inputs["links"] = pd.concat(links)
    else:
        pypsa_inputs["links"] = pd.DataFrame()

    pypsa_inputs.update(
        _translate_custom_constraints(
            config, ispypsa_tables, pypsa_inputs["links"], pypsa_inputs["generators"]
        )
    )

    return pypsa_inputs

ispypsa.translator.create_pypsa_friendly_snapshots

create_pypsa_friendly_snapshots(config: ModelConfig, model_phase: Literal['capacity_expansion', 'operational'], existing_generators: DataFrame | None = None, demand_traces: dict[str, DataFrame] | None = None, generator_traces: dict[str, DataFrame] | None = None) -> pd.DataFrame

Create a pd.DataFrame of PyPSA-compatible snapshots and associated investment periods for either the 'capacity_expansion' or 'operational' model phase.

This function also includes investment_periods for the 'operational' phase (even though they are not strictly required), because PyPSA expects both 'snapshots' and 'investment_periods' when overwriting an existing network.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.config import load_config
>>> from ispypsa.translator import create_pypsa_friendly_snapshots

Load the ISPyPSA configuration.

>>> config = load_config(Path("ispypsa_config.yaml"))

Create snapshots for capacity expansion modelling.

>>> snapshots = create_pypsa_friendly_snapshots(config, "capacity_expansion")

Create snapshots for operational modelling.

>>> snapshots = create_pypsa_friendly_snapshots(config, "operational")

Parameters:

  • config (ModelConfig) –

    ispypsa.ModelConfig instance

  • model_phase (Literal['capacity_expansion', 'operational']) –

    string defining whether the snapshots are for the operational or capacity expansion phase of the modelling. This allows the correct temporal config inputs to be used from the ModelConfig instance.

  • existing_generators (DataFrame | None, default: None ) –

    pd.DataFrame containing existing generators data, required if using named_representative_weeks with residual metrics (e.g., "residual-peak-demand", "residual-minimum-demand") (optional).

  • demand_traces (dict[str, DataFrame] | None, default: None ) –

    dict[str, pd.DataFrame] with demand node names as keys and time series dataframes as values (optional, required for named_representative_weeks).

  • generator_traces (dict[str, DataFrame] | None, default: None ) –

    dict[str, pd.DataFrame] with generator names as keys and time series dataframes as values (optional, required for residual metrics).

A pd.DataFrame containing the columns 'investment_periods' (int) defining

  • DataFrame

    the investment a modelled inteval belongs to and 'snapshots' (datetime) defining

  • DataFrame

    each time interval modelled. 'investment_periods' periods are refered to by the

  • DataFrame

    year (financial or calander) in which they begin.

Source code in src\ispypsa\translator\snapshots.py
def create_pypsa_friendly_snapshots(
    config: ModelConfig,
    model_phase: Literal["capacity_expansion", "operational"],
    existing_generators: pd.DataFrame | None = None,
    demand_traces: dict[str, pd.DataFrame] | None = None,
    generator_traces: dict[str, pd.DataFrame] | None = None,
) -> pd.DataFrame:
    """
    Create a pd.DataFrame of PyPSA-compatible snapshots and associated investment
    periods for either the 'capacity_expansion' or 'operational' model phase.

    This function also includes investment_periods for the 'operational' phase
    (even though they are not strictly required), because PyPSA expects both 'snapshots'
    and 'investment_periods' when overwriting an existing network.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.config import load_config
        >>> from ispypsa.translator import create_pypsa_friendly_snapshots

        Load the ISPyPSA configuration.
        >>> config = load_config(Path("ispypsa_config.yaml"))

        Create snapshots for capacity expansion modelling.
        >>> snapshots = create_pypsa_friendly_snapshots(config, "capacity_expansion")

        Create snapshots for operational modelling.
        >>> snapshots = create_pypsa_friendly_snapshots(config, "operational")

    Args:
        config: ispypsa.ModelConfig instance
        model_phase: string defining whether the snapshots are for the operational or
            capacity expansion phase of the modelling. This allows the correct temporal
            config inputs to be used from the ModelConfig instance.
        existing_generators: pd.DataFrame containing existing generators data, required
            if using named_representative_weeks with residual metrics (e.g.,
            "residual-peak-demand", "residual-minimum-demand") (optional).
        demand_traces: dict[str, pd.DataFrame] with demand node names as keys and
            time series dataframes as values (optional, required for named_representative_weeks).
        generator_traces: dict[str, pd.DataFrame] with generator names as keys and
            time series dataframes as values (optional, required for residual metrics).

    Returns: A pd.DataFrame containing the columns 'investment_periods' (int) defining
        the investment a modelled inteval belongs to and 'snapshots' (datetime) defining
        each time interval modelled. 'investment_periods' periods are refered to by the
        year (financial or calander) in which they begin.
    """
    if model_phase == "capacity_expansion":
        resolution_min = config.temporal.capacity_expansion.resolution_min
        aggregation = config.temporal.capacity_expansion.aggregation
        investment_periods = config.temporal.capacity_expansion.investment_periods
    else:
        resolution_min = config.temporal.operational.resolution_min
        aggregation = config.temporal.operational.aggregation
        investment_periods = config.temporal.capacity_expansion.investment_periods

    snapshots = _create_complete_snapshots_index(
        start_year=config.temporal.range.start_year,
        end_year=config.temporal.range.end_year,
        temporal_resolution_min=resolution_min,
        year_type=config.temporal.year_type,
    )

    snapshots = _filter_snapshots(
        config.temporal.year_type,
        config.temporal.range,
        aggregation,
        snapshots,
        existing_generators=existing_generators,
        demand_traces=demand_traces,
        generator_traces=generator_traces,
    )

    snapshots = _add_investment_periods(
        snapshots, investment_periods, config.temporal.year_type
    )

    return snapshots

ispypsa.translator.create_pypsa_friendly_timeseries_inputs

create_pypsa_friendly_timeseries_inputs(config: ModelConfig, model_phase: Literal['capacity_expansion', 'operational'], ispypsa_tables: dict[str, DataFrame], generators: DataFrame, parsed_traces_directory: Path, pypsa_friendly_timeseries_inputs_location: Path, snapshots: DataFrame | None = None) -> pd.DataFrame

Creates snapshots and timeseries data files in PyPSA friendly format for generation and demand.

  • First creates snapshots based on the temporal configuration, optionally using named_representative_weeks and/or representative_weeks if configured. If snapshots are provided, they are used instead of generating new ones.

  • a time series file is created for each wind and solar generator in the new_entrant_generators table (table in ispypsa_tables dict). The time series data is saved in parquet files in the 'solar_traces' and 'wind_traces' directories with the columns "snapshots" (datetime) and "p_max_pu" (float specifying availability in MW).

  • a time series file is created for each generator in the translated generators table (table in pypsa_inputs dict) containing the marginal costs for each generator in each snapshot. The time series data is saved in parquet files in the 'marginal_costs' directory with the columns "snapshots" (datetime) and "marginal_cost" (float specifying marginal cost in $/MWh).

  • a time series file is created for each model region specifying the load in that region (regions set by config.network.nodes.regional_granularity). The time series data is saved in parquet files in the 'demand_traces' directory with the columns "snapshots" (datetime) and "p_set" (float specifying load in MW).

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.config import load_config
>>> from ispypsa.data_fetch import read_csvs
>>> from ispypsa.translator import (
...     create_pypsa_friendly_inputs,
...     create_pypsa_friendly_timeseries_inputs
... )

Load config and ISPyPSA input tables.

>>> config = load_config(Path("ispypsa_config.yaml"))
>>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))

Create PyPSA-friendly inputs to get the generators table.

>>> pypsa_friendly_inputs = create_pypsa_friendly_inputs(config, ispypsa_tables)

Create timeseries inputs for capacity expansion modelling.

>>> snapshots = create_pypsa_friendly_timeseries_inputs(
...     config,
...     "capacity_expansion",
...     ispypsa_tables,
...     pypsa_friendly_inputs["generators"],
...     Path("parsed_traces"),
...     Path("pypsa_friendly/timeseries")
... )

Parameters:

  • config (ModelConfig) –

    ISPyPSA ModelConfig instance.

  • model_phase (Literal['capacity_expansion', 'operational']) –

    Either "capacity_expansion" or "operational". Determines which temporal config settings to use.

  • ispypsa_tables (dict[str, DataFrame]) –

    Dictionary of ISPyPSA input tables. Must contain ecaa_generators, new_entrant_generators, sub_regions tables, and fuel cost tables for fuel types present in the generator tables.

  • generators (DataFrame) –

    PyPSA-friendly generators DataFrame (from create_pypsa_friendly_inputs). Used for calculating dynamic marginal costs.

  • parsed_traces_directory (Path) –

    Path to trace data parsed using isp-trace-parser.

  • pypsa_friendly_timeseries_inputs_location (Path) –

    Path where timeseries data will be saved.

  • snapshots (DataFrame | None, default: None ) –

    Optional pre-defined snapshots DataFrame. If provided, must contain 'snapshots' (datetime) and 'investment_periods' (int) columns.

Returns:

  • DataFrame

    pd.DataFrame containing the snapshots used for the timeseries.

Source code in src\ispypsa\translator\create_pypsa_friendly.py
def create_pypsa_friendly_timeseries_inputs(
    config: ModelConfig,
    model_phase: Literal["capacity_expansion", "operational"],
    ispypsa_tables: dict[str, pd.DataFrame],
    generators: pd.DataFrame,
    parsed_traces_directory: Path,
    pypsa_friendly_timeseries_inputs_location: Path,
    snapshots: pd.DataFrame | None = None,
) -> pd.DataFrame:
    """Creates snapshots and timeseries data files in PyPSA friendly format for generation
    and demand.

    - First creates snapshots based on the temporal configuration, optionally using
      named_representative_weeks and/or representative_weeks if configured. If snapshots
      are provided, they are used instead of generating new ones.

    - a time series file is created for each wind and solar generator in the new_entrant_generators
    table (table in ispypsa_tables dict). The time series data is saved in parquet files
    in the 'solar_traces' and 'wind_traces' directories with the columns "snapshots"
    (datetime) and "p_max_pu" (float specifying availability in MW).

    - a time series file is created for each generator in the translated generators table
    (table in pypsa_inputs dict) containing the marginal costs for each generator in each
    snapshot. The time series data is saved in parquet files in the 'marginal_costs' directory
    with the columns "snapshots" (datetime) and "marginal_cost" (float specifying marginal
    cost in $/MWh).

    - a time series file is created for each model region specifying the load in that
    region (regions set by config.network.nodes.regional_granularity). The time series
    data is saved in parquet files in the 'demand_traces' directory with the columns
    "snapshots" (datetime) and "p_set" (float specifying load in MW).

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.config import load_config
        >>> from ispypsa.data_fetch import read_csvs
        >>> from ispypsa.translator import (
        ...     create_pypsa_friendly_inputs,
        ...     create_pypsa_friendly_timeseries_inputs
        ... )

        Load config and ISPyPSA input tables.
        >>> config = load_config(Path("ispypsa_config.yaml"))
        >>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))

        Create PyPSA-friendly inputs to get the generators table.
        >>> pypsa_friendly_inputs = create_pypsa_friendly_inputs(config, ispypsa_tables)

        Create timeseries inputs for capacity expansion modelling.
        >>> snapshots = create_pypsa_friendly_timeseries_inputs(
        ...     config,
        ...     "capacity_expansion",
        ...     ispypsa_tables,
        ...     pypsa_friendly_inputs["generators"],
        ...     Path("parsed_traces"),
        ...     Path("pypsa_friendly/timeseries")
        ... )

    Args:
        config: ISPyPSA ModelConfig instance.
        model_phase: Either "capacity_expansion" or "operational". Determines which
            temporal config settings to use.
        ispypsa_tables: Dictionary of ISPyPSA input tables. Must contain
            ecaa_generators, new_entrant_generators, sub_regions tables, and fuel
            cost tables for fuel types present in the generator tables.
        generators: PyPSA-friendly generators DataFrame (from create_pypsa_friendly_inputs).
            Used for calculating dynamic marginal costs.
        parsed_traces_directory: Path to trace data parsed using isp-trace-parser.
        pypsa_friendly_timeseries_inputs_location: Path where timeseries data will be saved.
        snapshots: Optional pre-defined snapshots DataFrame. If provided, must contain
            'snapshots' (datetime) and 'investment_periods' (int) columns.

    Returns:
        pd.DataFrame containing the snapshots used for the timeseries.
    """

    if model_phase == "capacity_expansion":
        reference_year_cycle = config.temporal.capacity_expansion.reference_year_cycle
    else:
        reference_year_cycle = config.temporal.operational.reference_year_cycle

    reference_year_mapping = construct_reference_year_mapping(
        start_year=config.temporal.range.start_year,
        end_year=config.temporal.range.end_year,
        reference_years=reference_year_cycle,
    )

    # Load generator timeseries data (organized by type)
    generator_traces_by_type = create_pypsa_friendly_ecaa_generator_timeseries(
        ispypsa_tables["ecaa_generators"],
        parsed_traces_directory,
        generator_types=["solar", "wind"],
        reference_year_mapping=reference_year_mapping,
        year_type=config.temporal.year_type,
    )

    # Load demand timeseries data
    demand_traces = create_pypsa_friendly_bus_demand_timeseries(
        ispypsa_tables["sub_regions"],
        parsed_traces_directory,
        scenario=config.scenario,
        regional_granularity=config.network.nodes.regional_granularity,
        reference_year_mapping=reference_year_mapping,
        year_type=config.temporal.year_type,
    )

    # Use provided snapshots or create new ones
    if snapshots is None:
        # Create snapshots, potentially using the loaded data for named_representative_weeks
        # Flatten generator traces for snapshot creation
        all_generator_traces = _flatten_generator_traces(generator_traces_by_type)

        snapshots = create_pypsa_friendly_snapshots(
            config,
            model_phase,
            existing_generators=ispypsa_tables.get("ecaa_generators"),
            demand_traces=demand_traces,
            generator_traces=all_generator_traces,
        )

    if generator_traces_by_type is not None:
        # Filter and save generator timeseries by type
        for gen_type, gen_traces in generator_traces_by_type.items():
            if gen_traces:
                _filter_and_save_timeseries(
                    gen_traces,
                    snapshots,
                    pypsa_friendly_timeseries_inputs_location,
                    f"{gen_type}_traces",
                )

    # Filter and save demand timeseries
    _filter_and_save_timeseries(
        demand_traces,
        snapshots,
        pypsa_friendly_timeseries_inputs_location,
        "demand_traces",
    )

    create_pypsa_friendly_new_entrant_generator_timeseries(
        ispypsa_tables["new_entrant_generators"],
        parsed_traces_directory,
        pypsa_friendly_timeseries_inputs_location,
        generator_types=["solar", "wind"],
        reference_year_mapping=reference_year_mapping,
        year_type=config.temporal.year_type,
        snapshots=snapshots,
    )

    # This is needed because numbers can be converted to strings if the data has been saved to a csv.
    generators = convert_to_numeric_if_possible(generators, cols=["marginal_cost"])
    # NOTE - maybe this function needs to be somewhere separate/handled a little
    # different because it currently requires the translated generator table as input?
    create_pypsa_friendly_dynamic_marginal_costs(
        ispypsa_tables,
        generators,
        snapshots,
        pypsa_friendly_timeseries_inputs_location,
    )

    snapshots = _add_snapshot_weightings(
        snapshots, config.temporal.capacity_expansion.resolution_min
    )

    return snapshots

Model Building & Execution

ispypsa.pypsa_build.build_pypsa_network

build_pypsa_network(pypsa_friendly_tables: dict[str:(DataFrame)], path_to_pypsa_friendly_timeseries_data: Path)

Creates a pypsa.Network based on set of pypsa friendly input tables.

Examples:

Peform required imports.

>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs, write_csvs
>>> from ispypsa.pypsa_build import build_pypsa_network

Read in PyPSA friendly tables from CSV.

>>> pypsa_input_tables = read_csvs(Path("pypsa_friendly_inputs_directory"))
>>> pypsa_friendly_inputs = build_pypsa_network(
... pypsa_friendly_tables=pypsa_input_tables,
... path_to_pypsa_friendly_timeseries_data=Path("pypsa_friendly_timeseries_data")
... )

Then the model can be run in PyPSA

>>> network.optimize.solve_model(solver_name="highs")

And the results saved to disk.

>>> network.export_to_netcdf(Path("model_results.nc"))

Parameters:

  • pypsa_friendly_tables (dict[str:(DataFrame)]) –

    dictionary of dataframes in the PyPSA friendly format. (add link to pypsa friendly format table docs)

  • path_to_pypsa_friendly_timeseries_data (Path) –

    Path to PyPSA friendly time series data (add link to timeseries data docs.

Returns:

  • pypsa.Network: A PyPSA network object ready for optimisation.

Source code in src\ispypsa\pypsa_build\build.py
def build_pypsa_network(
    pypsa_friendly_tables: dict[str : pd.DataFrame],
    path_to_pypsa_friendly_timeseries_data: Path,
):
    """Creates a `pypsa.Network` based on set of pypsa friendly input tables.

    Examples:
        Peform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.data_fetch import read_csvs, write_csvs
        >>> from ispypsa.pypsa_build import build_pypsa_network

        Read in PyPSA friendly tables from CSV.
        >>> pypsa_input_tables = read_csvs(Path("pypsa_friendly_inputs_directory"))

        >>> pypsa_friendly_inputs = build_pypsa_network(
        ... pypsa_friendly_tables=pypsa_input_tables,
        ... path_to_pypsa_friendly_timeseries_data=Path("pypsa_friendly_timeseries_data")
        ... )

        Then the model can be run in PyPSA
        >>> network.optimize.solve_model(solver_name="highs")

        And the results saved to disk.
        >>> network.export_to_netcdf(Path("model_results.nc"))

    Args:
        pypsa_friendly_tables: dictionary of dataframes in the `PyPSA` friendly format.
            (add link to pypsa friendly format table docs)
        path_to_pypsa_friendly_timeseries_data: `Path` to `PyPSA` friendly time series
            data (add link to timeseries data docs.

    Returns:
        pypsa.Network: A PyPSA network object ready for optimisation.
    """
    network = _initialise_network(pypsa_friendly_tables["snapshots"])

    _add_investment_period_weights(
        network, pypsa_friendly_tables["investment_period_weights"]
    )

    _add_carriers_to_network(
        network,
        pypsa_friendly_tables.get("generators"),
        pypsa_friendly_tables.get("batteries"),
    )

    _add_buses_to_network(
        network, pypsa_friendly_tables["buses"], path_to_pypsa_friendly_timeseries_data
    )

    if "links" in pypsa_friendly_tables.keys():
        _add_links_to_network(network, pypsa_friendly_tables["links"])

    _add_generators_to_network(
        network,
        pypsa_friendly_tables["generators"],
        path_to_pypsa_friendly_timeseries_data,
    )

    if "batteries" in pypsa_friendly_tables.keys():
        _add_batteries_to_network(network, pypsa_friendly_tables["batteries"])

    if "custom_constraints_generators" in pypsa_friendly_tables.keys():
        _add_bus_for_custom_constraints(network)

        _add_custom_constraint_generators_to_network(
            network, pypsa_friendly_tables["custom_constraints_generators"]
        )

    # The underlying linopy model needs to get built so we can add custom constraints.
    network.optimize.create_model(multi_investment_periods=True)

    if "custom_constraints_rhs" in pypsa_friendly_tables:
        _add_custom_constraints(
            network,
            pypsa_friendly_tables["custom_constraints_rhs"],
            pypsa_friendly_tables["custom_constraints_lhs"],
        )

    return network

ispypsa.pypsa_build.update_network_timeseries

update_network_timeseries(network: Network, pypsa_friendly_input_tables: dict[str, DataFrame], snapshots: DataFrame, pypsa_friendly_timeseries_location: Path) -> None

Update the time series data in a pypsa.Network instance.

Designed to help convert capacity expansion network models into operational models but may also be useful in other circumstances, such as when running a capacity expansion model with different reference year cycles.

Examples:

>>> import pandas as pd
>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs
>>> from ispypsa.pypsa_build import update_network_timeseries

Get PyPSA friendly inputs (inparticular these need to contain the generators and buses tables).

>>> pypsa_friendly_input_tables = read_csvs("path/to/pypsa/friendly/inputs")

Get the snapshots for the updated time series data.

>>> snapshots = pd.read_csv("new_snapshots.csv")

Get the pypsa.Network we want to update the time series data in.

>>> network = pypsa.Network()
>>> network.import_from_netcdf("existing_network.nc")

Create pd.Dataframe defining the set of snapshot (time intervals) to be used.

>>> update_network_timeseries(
...     network,
...     pypsa_friendly_input_tables,
...     snapshots,
...     Path("path/to/time/series/data/files")
... )

Parameters:

  • network (Network) –

    pypsa.Network which has set of generators, loads, and buses consistent with the updated time series data. i.e. if generator 'Y' exists in the existing network it also needs to exist in the updated time series data.

  • pypsa_friendly_input_tables (dict[str, DataFrame]) –

    dictionary of dataframes in the PyPSA friendly format. (add link to pypsa friendly format table docs)

  • snapshots (DataFrame) –

    a pd.DataFrame containing the columns 'investment_periods' (int) defining the investment a modelled inteval belongs to and 'snapshots' (datetime) defining each time interval modelled. 'investment_periods' periods are refered to by the year (financial or calander) in which they begin.

  • pypsa_friendly_timeseries_location (Path) –

    Path to PyPSA friendly time series data (add link to timeseries data docs).

Returns: None

Source code in src\ispypsa\pypsa_build\update.py
def update_network_timeseries(
    network: pypsa.Network,
    pypsa_friendly_input_tables: dict[str, pd.DataFrame],
    snapshots: pd.DataFrame,
    pypsa_friendly_timeseries_location: Path,
) -> None:
    """
    Update the time series data in a pypsa.Network instance.

    Designed to help convert capacity expansion network models into operational models
    but may also be useful in other circumstances, such as when running a capacity
    expansion model with different reference year cycles.

    Examples:
        >>> import pandas as pd
        >>> from pathlib import Path
        >>> from ispypsa.data_fetch import read_csvs
        >>> from ispypsa.pypsa_build import update_network_timeseries

        Get PyPSA friendly inputs (inparticular these need to contain the generators and
        buses tables).

        >>> pypsa_friendly_input_tables = read_csvs("path/to/pypsa/friendly/inputs")

        Get the snapshots for the updated time series data.

        >>> snapshots = pd.read_csv("new_snapshots.csv")

        Get the pypsa.Network we want to update the time series data in.

        >>> network = pypsa.Network()
        >>> network.import_from_netcdf("existing_network.nc")

        Create pd.Dataframe defining the set of snapshot (time intervals) to be used.

        >>> update_network_timeseries(
        ...     network,
        ...     pypsa_friendly_input_tables,
        ...     snapshots,
        ...     Path("path/to/time/series/data/files")
        ... )

    Args:
        network: pypsa.Network which has set of generators, loads, and buses consistent
            with the updated time series data. i.e. if generator 'Y' exists in the
            existing network it also needs to exist in the updated time series data.
        pypsa_friendly_input_tables: dictionary of dataframes in the `PyPSA` friendly
            format. (add link to pypsa friendly format table docs)
        snapshots: a pd.DataFrame containing the columns 'investment_periods' (int)
            defining the investment a modelled inteval belongs to and 'snapshots'
            (datetime) defining each time interval modelled. 'investment_periods'
            periods are refered to by the year (financial or calander) in which they
            begin.
        pypsa_friendly_timeseries_location: `Path` to `PyPSA` friendly time series
            data (add link to timeseries data docs).

    Returns: None
    """
    snapshots["snapshots"] = pd.to_datetime(snapshots["snapshots"])
    snapshots_as_indexes = pd.MultiIndex.from_arrays(
        [snapshots["investment_periods"], snapshots["snapshots"]]
    )
    network.snapshots = snapshots_as_indexes
    network.set_investment_periods(snapshots["investment_periods"].unique())
    _update_generators_availability_timeseries(
        network,
        pypsa_friendly_input_tables["generators"],
        pypsa_friendly_timeseries_location,
    )
    _update_buses_demand_timeseries(
        network,
        pypsa_friendly_input_tables["buses"],
        pypsa_friendly_timeseries_location,
    )

    # The underlying linopy model needs to get built again here so that the new time
    # series data is used in the linopy model rather than the old data.
    network.optimize.create_model()

    # As we rebuilt the linopy model now we need to re add custom constrains.
    _add_custom_constraints(
        network,
        pypsa_friendly_input_tables["custom_constraints_rhs"],
        pypsa_friendly_input_tables["custom_constraints_lhs"],
    )

ispypsa.pypsa_build.save_pypsa_network

save_pypsa_network(network: Network, save_directory: Path, save_name: str) -> None

Save the optimised PyPSA network as a NetCDF file.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.pypsa_build import save_pypsa_network

After running the model optimisation, save the network.

>>> network.optimize.solve_model(solver_name="highs")
>>> save_pypsa_network(
...     network,
...     save_directory=Path("outputs"),
...     save_name="capacity_expansion"
... )
# Saves to outputs/capacity_expansion.nc

Save operational model results.

>>> save_pypsa_network(
...     network,
...     save_directory=Path("outputs"),
...     save_name="operational"
... )
# Saves to outputs/operational.nc

Parameters:

  • network (Network) –

    The solved PyPSA network object.

  • save_directory (Path) –

    Directory where the network file should be saved.

  • save_name (str) –

    Name for the saved file (without .nc extension).

Returns:

  • None

    None

Source code in src\ispypsa\pypsa_build\save.py
def save_pypsa_network(
    network: pypsa.Network, save_directory: Path, save_name: str
) -> None:
    """Save the optimised PyPSA network as a NetCDF file.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.pypsa_build import save_pypsa_network

        After running the model optimisation, save the network.
        >>> network.optimize.solve_model(solver_name="highs")
        >>> save_pypsa_network(
        ...     network,
        ...     save_directory=Path("outputs"),
        ...     save_name="capacity_expansion"
        ... )
        # Saves to outputs/capacity_expansion.nc

        Save operational model results.
        >>> save_pypsa_network(
        ...     network,
        ...     save_directory=Path("outputs"),
        ...     save_name="operational"
        ... )
        # Saves to outputs/operational.nc

    Args:
        network: The solved PyPSA network object.
        save_directory: Directory where the network file should be saved.
        save_name: Name for the saved file (without .nc extension).

    Returns:
        None
    """
    network.export_to_netcdf(Path(save_directory, f"{save_name}.nc"))

Tabular Results Extraction

ispypsa.results.extract_tabular_results

extract_tabular_results(network: Network, ispypsa_tables: dict[str, DataFrame]) -> dict[str:(pd.DataFrame)]

Extract the results from the PyPSA network and return a dictionary of results.

Extracts generation expansion, transmission expansion, dispatch, demand, and transmission flow results from the solved PyPSA network.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs, write_csvs
>>> from ispypsa.results import extract_tabular_results

Load ISPyPSA input tables (needed for regions mapping).

>>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))

After solving the network, extract the results.

>>> network.optimize.solve_model(solver_name="highs")
>>> results = extract_tabular_results(network, ispypsa_tables)

Access specific result tables.

>>> generation_expansion = results["generation_expansion"]
>>> transmission_flows = results["transmission_flows"]

Write results to CSV files.

>>> write_csvs(results, Path("outputs/results"))

Parameters:

  • network (Network) –

    The PyPSA network object.

  • ispypsa_tables (dict[str, DataFrame]) –

    Dictionary of ISPyPSA input tables (needed for regions mapping).

Returns:

  • dict[str:(DataFrame)]

    A dictionary of results with the file name as the key and the results as the value.

Source code in src\ispypsa\results\extract.py
def extract_tabular_results(
    network: pypsa.Network,
    ispypsa_tables: dict[str, pd.DataFrame],
) -> dict[str : pd.DataFrame]:
    """Extract the results from the PyPSA network and return a dictionary of results.

    Extracts generation expansion, transmission expansion, dispatch, demand, and
    transmission flow results from the solved PyPSA network.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.data_fetch import read_csvs, write_csvs
        >>> from ispypsa.results import extract_tabular_results

        Load ISPyPSA input tables (needed for regions mapping).
        >>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))

        After solving the network, extract the results.
        >>> network.optimize.solve_model(solver_name="highs")
        >>> results = extract_tabular_results(network, ispypsa_tables)

        Access specific result tables.
        >>> generation_expansion = results["generation_expansion"]
        >>> transmission_flows = results["transmission_flows"]

        Write results to CSV files.
        >>> write_csvs(results, Path("outputs/results"))

    Args:
        network: The PyPSA network object.
        ispypsa_tables: Dictionary of ISPyPSA input tables (needed for regions mapping).

    Returns:
        A dictionary of results with the file name as the key and the results as the value.
    """

    # Functions that require link_flows and regions_mapping parameters
    geographic_transmission_functions = {
        "rez_transmission_flows",
        "isp_sub_region_transmission_flows",
        "nem_region_transmission_flows",
    }

    results = {}

    # Extract regions and zones mapping to be used in other functions that require it.
    results["regions_and_zones_mapping"] = extract_regions_and_zones_mapping(
        ispypsa_tables
    )

    # Extract first transmission flows to be used in other functions that require it.
    results["transmission_flows"] = extract_transmission_flows(network)

    for file, function in RESULTS_FILES.items():
        if file in ["transmission_flows", "regions_and_zones_mapping"]:
            continue

        if file in geographic_transmission_functions:
            results[file] = function(
                results["transmission_flows"], results["regions_and_zones_mapping"]
            )
        else:
            results[file] = function(network)

    return results

Plotting

ispypsa.plotting.create_plot_suite

create_plot_suite(results: dict[str, DataFrame]) -> dict[Path, dict]

Create a suite of plots for ISPyPSA modelling results.

Works for both capacity expansion and operational model results.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs
>>> from ispypsa.results import extract_tabular_results
>>> from ispypsa.plotting import create_plot_suite, save_plots

Extract tabular results from the solved network.

>>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))
>>> results = extract_tabular_results(network, ispypsa_tables)

Create the plot suite from the results.

>>> plots = create_plot_suite(results)

Save the plots to disk.

>>> save_plots(plots, Path("outputs/plots"))

Parameters:

  • results (dict[str, DataFrame]) –

    A dictionary of tabular results from the ISPyPSA model. Should contain: - transmission_expansion - transmission_flows - nem_region_transmission_flows - isp_sub_region_transmission_flows - regions_and_zones_mapping - generator_dispatch - generation_expansion - demand

Returns:

  • dict[Path, dict]

    A dictionary of plots with the file path as the key and the plot as the value.

Source code in src\ispypsa\plotting\plot.py
def create_plot_suite(
    results: dict[str, pd.DataFrame],
) -> dict[Path, dict]:
    """Create a suite of plots for ISPyPSA modelling results.

    Works for both capacity expansion and operational model results.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.data_fetch import read_csvs
        >>> from ispypsa.results import extract_tabular_results
        >>> from ispypsa.plotting import create_plot_suite, save_plots

        Extract tabular results from the solved network.
        >>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))
        >>> results = extract_tabular_results(network, ispypsa_tables)

        Create the plot suite from the results.
        >>> plots = create_plot_suite(results)

        Save the plots to disk.
        >>> save_plots(plots, Path("outputs/plots"))

    Args:
        results: A dictionary of tabular results from the ISPyPSA model. Should contain:
            - transmission_expansion
            - transmission_flows
            - nem_region_transmission_flows
            - isp_sub_region_transmission_flows
            - regions_and_zones_mapping
            - generator_dispatch
            - generation_expansion
            - demand

    Returns:
        A dictionary of plots with the file path as the key and the plot as the value.
    """
    nem_region_flows = results.get("nem_region_transmission_flows", pd.DataFrame())
    isp_sub_region_flows = results.get(
        "isp_sub_region_transmission_flows", pd.DataFrame()
    )

    plots = {
        "transmission": {
            "aggregate_capacity": plot_aggregate_transmission_capacity(
                results["transmission_expansion"], results["regions_and_zones_mapping"]
            ),
            "flows": plot_flows(
                results["transmission_flows"], results["transmission_expansion"]
            ),
            "regional_expansion": plot_regional_capacity_expansion(
                results["transmission_expansion"], results["regions_and_zones_mapping"]
            ),
        },
        "generation": plot_generation_capacity_expansion(
            results["generation_expansion"], results["regions_and_zones_mapping"]
        ),
        "dispatch": {
            "system": plot_dispatch(
                results["generator_dispatch"],
                results["demand"],
            ),
            "regional": plot_dispatch(
                results["generator_dispatch"],
                results["demand"],
                results["regions_and_zones_mapping"],
                "nem_region_id",
                nem_region_flows,
            ),
            "sub_regional": plot_dispatch(
                results["generator_dispatch"],
                results["demand"],
                results["regions_and_zones_mapping"],
                "isp_sub_region_id",
                isp_sub_region_flows,
            ),
        },
    }
    return flatten_dict_with_file_paths_as_keys(plots)

ispypsa.plotting.save_plots

save_plots(charts: dict[Path, dict], base_path: Path) -> None

Save a suite of Plotly plots and their underlying data to the plots directory.

All plots are saved as interactive HTML files with accompanying CSV data files.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.plotting import create_plot_suite, save_plots

Create plots from results (see create_plot_suite for how to get results).

>>> plots = create_plot_suite(results)

Save all plots to a directory.

>>> save_plots(plots, Path("outputs/capacity_expansion_plots"))
# Creates HTML files like:
# outputs/capacity_expansion_plots/transmission/aggregate_capacity.html
# outputs/capacity_expansion_plots/transmission/aggregate_capacity.csv
# outputs/capacity_expansion_plots/generation.html
# outputs/capacity_expansion_plots/generation.csv

Parameters:

  • charts (dict[Path, dict]) –

    A dictionary with file paths as keys and dicts with "plot" and "data" as values.

  • base_path (Path) –

    The path to the directory where the plots are saved.

Returns:

  • None

    None

Source code in src\ispypsa\plotting\plot.py
def save_plots(charts: dict[Path, dict], base_path: Path) -> None:
    """Save a suite of Plotly plots and their underlying data to the plots directory.

    All plots are saved as interactive HTML files with accompanying CSV data files.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.plotting import create_plot_suite, save_plots

        Create plots from results (see `create_plot_suite` for how to get results).
        >>> plots = create_plot_suite(results)

        Save all plots to a directory.
        >>> save_plots(plots, Path("outputs/capacity_expansion_plots"))
        # Creates HTML files like:
        # outputs/capacity_expansion_plots/transmission/aggregate_capacity.html
        # outputs/capacity_expansion_plots/transmission/aggregate_capacity.csv
        # outputs/capacity_expansion_plots/generation.html
        # outputs/capacity_expansion_plots/generation.csv

    Args:
        charts: A dictionary with file paths as keys and dicts with "plot" and "data" as values.
        base_path: The path to the directory where the plots are saved.

    Returns:
        None
    """
    for path, content in charts.items():
        plot = content["plot"]

        # Save Plotly chart as HTML
        html_path = base_path / path
        html_path.parent.mkdir(parents=True, exist_ok=True)

        # Save the underlying data (CSV)
        csv_path = html_path.with_suffix(".csv")
        content["data"].to_csv(csv_path, index=False)

        # Save the plot (HTML) with responsive sizing
        plot.write_html(
            html_path,
            full_html=True,
            include_plotlyjs=True,
            config={"responsive": True},
        )

ispypsa.plotting.generate_results_website

generate_results_website(plots: Dict[Path, dict], plots_dir: Path, output_dir: Path, site_name: str = 'ISPyPSA Results', output_filename: str = 'results_viewer.html', subtitle: str = 'Capacity Expansion Analysis', regions_and_zones_mapping: Optional[DataFrame] = None) -> None

Generate a static website for navigating ISPyPSA plot results.

Creates a single HTML file with a navigation pane that mirrors the directory structure of the plots.

Examples:

Perform required imports.

>>> from pathlib import Path
>>> from ispypsa.data_fetch import read_csvs
>>> from ispypsa.results import extract_tabular_results
>>> from ispypsa.plotting import (
...     create_plot_suite,
...     save_plots,
...     generate_results_website
... )

Extract results and create plots.

>>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))
>>> results = extract_tabular_results(network, ispypsa_tables)
>>> plots = create_plot_suite(results)

Save plots to disk first.

>>> plots_dir = Path("outputs/capacity_expansion_plots")
>>> save_plots(plots, plots_dir)

Generate the results website.

>>> generate_results_website(
...     plots,
...     plots_dir,
...     output_dir=Path("outputs"),
...     output_filename="capacity_expansion_results_viewer.html",
...     subtitle="Capacity Expansion Analysis",
...     regions_and_zones_mapping=results["regions_and_zones_mapping"]
... )
# Creates outputs/capacity_expansion_results_viewer.html

Parameters:

  • plots (Dict[Path, dict]) –

    Dictionary with Path keys (plot file paths) and dict values containing "plot" and "data" keys (output from create_*_plot_suite)

  • plots_dir (Path) –

    Directory where plots are saved (e.g., outputs/capacity_expansion_plots)

  • output_dir (Path) –

    Directory where the website HTML should be saved (e.g., outputs directory)

  • site_name (str, default: 'ISPyPSA Results' ) –

    Name of the website (default: "ISPyPSA Results")

  • output_filename (str, default: 'results_viewer.html' ) –

    Name of the output HTML file (default: "results_viewer.html")

  • subtitle (str, default: 'Capacity Expansion Analysis' ) –

    Subtitle to display in the header (default: "Capacity Expansion Analysis")

  • regions_and_zones_mapping (Optional[DataFrame], default: None ) –

    Optional mapping table to ensure correct capitalization of region and zone IDs

Returns:

  • None

    None. The website is saved as output_filename in output_dir

Source code in src\ispypsa\plotting\website.py
def generate_results_website(
    plots: Dict[Path, dict],
    plots_dir: Path,
    output_dir: Path,
    site_name: str = "ISPyPSA Results",
    output_filename: str = "results_viewer.html",
    subtitle: str = "Capacity Expansion Analysis",
    regions_and_zones_mapping: Optional[pd.DataFrame] = None,
) -> None:
    """Generate a static website for navigating ISPyPSA plot results.

    Creates a single HTML file with a navigation pane that mirrors
    the directory structure of the plots.

    Examples:
        Perform required imports.
        >>> from pathlib import Path
        >>> from ispypsa.data_fetch import read_csvs
        >>> from ispypsa.results import extract_tabular_results
        >>> from ispypsa.plotting import (
        ...     create_plot_suite,
        ...     save_plots,
        ...     generate_results_website
        ... )

        Extract results and create plots.
        >>> ispypsa_tables = read_csvs(Path("ispypsa_inputs"))
        >>> results = extract_tabular_results(network, ispypsa_tables)
        >>> plots = create_plot_suite(results)

        Save plots to disk first.
        >>> plots_dir = Path("outputs/capacity_expansion_plots")
        >>> save_plots(plots, plots_dir)

        Generate the results website.
        >>> generate_results_website(
        ...     plots,
        ...     plots_dir,
        ...     output_dir=Path("outputs"),
        ...     output_filename="capacity_expansion_results_viewer.html",
        ...     subtitle="Capacity Expansion Analysis",
        ...     regions_and_zones_mapping=results["regions_and_zones_mapping"]
        ... )
        # Creates outputs/capacity_expansion_results_viewer.html

    Args:
        plots: Dictionary with Path keys (plot file paths) and dict values
               containing "plot" and "data" keys (output from create_*_plot_suite)
        plots_dir: Directory where plots are saved (e.g., outputs/capacity_expansion_plots)
        output_dir: Directory where the website HTML should be saved (e.g., outputs directory)
        site_name: Name of the website (default: "ISPyPSA Results")
        output_filename: Name of the output HTML file (default: "results_viewer.html")
        subtitle: Subtitle to display in the header (default: "Capacity Expansion Analysis")
        regions_and_zones_mapping: Optional mapping table to ensure correct capitalization
            of region and zone IDs

    Returns:
        None. The website is saved as output_filename in output_dir
    """
    logging.info(f"Generating results website: {output_filename}...")

    # Get list of plot paths
    plot_paths = list(plots.keys())

    if not plot_paths:
        logging.warning("No plots found to generate website")
        return

    # Derive plot directory name from the plots_dir path
    plot_dir_name = plots_dir.name

    # Build plot tree structure
    plot_tree = _build_plot_tree(plot_paths)

    # Build known IDs list if mapping is provided
    known_ids_list = []
    if regions_and_zones_mapping is not None:
        known_ids = set()
        for col in ["nem_region_id", "isp_sub_region_id", "rez_id"]:
            if col in regions_and_zones_mapping.columns:
                ids = regions_and_zones_mapping[col].dropna().astype(str).unique()
                for id_val in ids:
                    # Standard version with spaces (matches text where underscores were replaced)
                    known_ids.add(id_val.replace("_", " ").upper())
                    # Hyphenated version (matches text with hyphens, e.g. SEQ-1)
                    known_ids.add(id_val.replace("_", "-").upper())

        # Add always capitalized words
        known_ids.update(ALWAYS_CAPITALIZED_WORDS)

        # Convert to sorted list (by length descending to handle overlapping IDs correctly)
        known_ids_list = sorted(known_ids, key=len, reverse=True)
    else:
        # If no mapping provided, at least use the hardcoded list
        known_ids_list = sorted(ALWAYS_CAPITALIZED_WORDS, key=len, reverse=True)

    # Convert tree to HTML
    plot_tree_html = _tree_to_html(plot_tree, known_ids=known_ids_list)

    # Generate complete HTML
    html_content = _generate_html_template(
        plot_tree_html, plot_dir_name, site_name, subtitle
    )

    # Write HTML file
    output_file = output_dir / output_filename
    output_file.write_text(html_content, encoding="utf-8")

    logging.info(f"Website generated successfully: {output_file}")
    logging.info(f"Open {output_file.name} in a browser to view the results")