From 290da2b13a2e749a82360e6d9a2e37e70c6e6900 Mon Sep 17 00:00:00 2001 From: Jimmy Tung Date: Wed, 9 Jul 2025 14:48:58 +1000 Subject: [PATCH 1/3] Move changelog to proper section Signed-off-by: Jimmy Tung --- changelog.md | 3 +- src/zepben/eas/client/eas_client.py | 195 +++++++++++------- .../eas/client/feeder_load_analysis_input.py | 55 +++++ src/zepben/eas/client/work_package.py | 44 ++++ 4 files changed, 227 insertions(+), 70 deletions(-) create mode 100644 src/zepben/eas/client/feeder_load_analysis_input.py diff --git a/changelog.md b/changelog.md index ebc04fd..55761c5 100644 --- a/changelog.md +++ b/changelog.md @@ -20,7 +20,8 @@ * `FixedTimeLoadOverride` now takes in optional list of floats instead of optional float for its variable. ### New Features -* None. +* Added `FeederLoadAnalysisInput` which holds the config for feeder-load-analysis studies. + * These then can be utilized in the `run_feeder_load_analysis_report` and `async_run_feeder_load_analysis_report` functions ### Enhancements * None. diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index a2337c9..8573559 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -16,6 +16,7 @@ from urllib3.exceptions import InsecureRequestWarning from zepben.auth import AuthMethod, ZepbenTokenFetcher, create_token_fetcher, create_token_fetcher_managed_identity +from zepben.eas.client.feeder_load_analysis_input import FeederLoadAnalysisInput from zepben.eas.client.opendss import OpenDssConfig, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput from zepben.eas.client.study import Study from zepben.eas.client.util import construct_url @@ -30,20 +31,20 @@ class EasClient: """ def __init__( - self, - host: str, - port: int, - protocol: str = "https", - client_id: Optional[str] = None, - username: Optional[str] = None, - password: Optional[str] = None, - access_token: Optional[str] = None, - client_secret: Optional[str] = None, - token_fetcher: Optional[ZepbenTokenFetcher] = None, - verify_certificate: bool = True, - ca_filename: Optional[str] = None, - session: ClientSession = None, - json_serialiser=None + self, + host: str, + port: int, + protocol: str = "https", + client_id: Optional[str] = None, + username: Optional[str] = None, + password: Optional[str] = None, + access_token: Optional[str] = None, + client_secret: Optional[str] = None, + token_fetcher: Optional[ZepbenTokenFetcher] = None, + verify_certificate: bool = True, + ca_filename: Optional[str] = None, + session: ClientSession = None, + json_serialiser=None ): """ Construct a client for the Evolve App Server. If the server is HTTPS, authentication may be configured. @@ -414,10 +415,10 @@ async def async_get_work_package_cost_estimation(self, work_package: WorkPackage sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -645,10 +646,10 @@ async def async_run_hosting_capacity_work_package(self, work_package: WorkPackag sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -687,10 +688,10 @@ async def async_cancel_hosting_capacity_work_package(self, work_package_id: str) sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -739,10 +740,66 @@ async def async_get_hosting_capacity_work_packages_progress(self): sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False + ) as response: + if response.ok: + response = await response.json() + else: + response = await response.text() + return response + + def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): + """ + Send request to evolve app server to run a feeder load analysis study + + :param feeder_load_analysis_input:: An instance of the `FeederLoadAnalysisConfig` data class representing the configuration for the run + :return: The HTTP response received from the Evolve App Server after attempting to run work package + """ + return get_event_loop().run_until_complete( + self.async_run_feeder_load_analysis_report(feeder_load_analysis_input)) + + async def async_run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): + """ + Asynchronously send request to evolve app server to run a feeder load analysis study + + :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report + """ + with warnings.catch_warnings(): + if not self._verify_certificate: + warnings.filterwarnings("ignore", category=InsecureRequestWarning) + json = { + "query": + """ + mutation runFeederLoadAnalysis($input: FeederLoadAnalysisInput!) { + runFeederLoadAnalysis(input: $input) + } + """, + "variables": { + "input": { + "feeders": feeder_load_analysis_input.feeders, + "startDate": feeder_load_analysis_input.startDate, + "endDate": feeder_load_analysis_input.endDate, + "fetchLvNetwork": feeder_load_analysis_input.fetchLvNetwork, + "processFeederLoads": feeder_load_analysis_input.processFeederLoads, + "processCoincidentLoads": feeder_load_analysis_input.processCoincidentLoads, + "produceBasicReport": feeder_load_analysis_input.produceBasicReport, + "produceConductorReport": feeder_load_analysis_input.produceConductorReport, + "aggregateAtFeederLevel": feeder_load_analysis_input.aggregateAtFeederLevel, + "output": feeder_load_analysis_input.output + } + } + } + if self._verify_certificate: + sslcontext = ssl.create_default_context(cafile=self._ca_filename) + + async with self.session.post( + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -804,10 +861,10 @@ async def async_upload_study(self, study: Study): sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -852,10 +909,10 @@ async def async_run_hosting_capacity_calibration(self, calibration_name: str, sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -903,10 +960,10 @@ async def async_get_hosting_capacity_calibration_run(self, id: str): sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -940,10 +997,10 @@ async def async_get_hosting_capacity_calibration_sets(self): sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -1094,10 +1151,10 @@ async def async_run_opendss_export(self, config: OpenDssConfig): sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -1106,11 +1163,11 @@ async def async_run_opendss_export(self, config: OpenDssConfig): return response def get_paged_opendss_models( - self, - limit: Optional[int] = None, - offset: Optional[int] = None, - query_filter: Optional[GetOpenDssModelsFilterInput] = None, - query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None): + self, + limit: Optional[int] = None, + offset: Optional[int] = None, + query_filter: Optional[GetOpenDssModelsFilterInput] = None, + query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None): """ Retrieve a paginated opendss export run information :param limit: The number of opendss export runs to retrieve @@ -1123,11 +1180,11 @@ def get_paged_opendss_models( self.async_get_paged_opendss_models(limit, offset, query_filter, query_sort)) async def async_get_paged_opendss_models( - self, - limit: Optional[int] = None, - offset: Optional[int] = None, - query_filter: Optional[GetOpenDssModelsFilterInput] = None, - query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None): + self, + limit: Optional[int] = None, + offset: Optional[int] = None, + query_filter: Optional[GetOpenDssModelsFilterInput] = None, + query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None): """ Retrieve a paginated opendss export run information :param limit: The number of opendss export runs to retrieve @@ -1281,10 +1338,10 @@ async def async_get_paged_opendss_models( sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False + construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), + headers=self._get_request_headers(), + json=json, + ssl=sslcontext if self._verify_certificate else False ) as response: if response.ok: response = await response.json() @@ -1314,11 +1371,11 @@ async def async_get_opendss_model_download_url(self, run_id: int): sslcontext = ssl.create_default_context(cafile=self._ca_filename) async with self.session.get( - construct_url(protocol=self._protocol, host=self._host, port=self._port, - path=f"/api/opendss-model/{run_id}"), - headers=self._get_request_headers(), - ssl=sslcontext if self._verify_certificate else False, - allow_redirects=False + construct_url(protocol=self._protocol, host=self._host, port=self._port, + path=f"/api/opendss-model/{run_id}"), + headers=self._get_request_headers(), + ssl=sslcontext if self._verify_certificate else False, + allow_redirects=False ) as response: if response.status == HTTPStatus.FOUND: response = response.headers["Location"] diff --git a/src/zepben/eas/client/feeder_load_analysis_input.py b/src/zepben/eas/client/feeder_load_analysis_input.py new file mode 100644 index 0000000..8ee690a --- /dev/null +++ b/src/zepben/eas/client/feeder_load_analysis_input.py @@ -0,0 +1,55 @@ +# Copyright 2020 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +from dataclasses import dataclass +from typing import List, Optional + +__all__ = [ + "FeederLoadAnalysisInput" +] + + +@dataclass +class FeederLoadAnalysisInput: + """ A data class representing the configuration for a feeder load analysis study """ + + feeders: Optional[List[str]] + """The mRIDs of feeders to solve for feeder load analysis""" + + substations: Optional[List[str]] + """The mRIDs of substations to solve for feeder load analysis""" + + subGeographicalRegions: Optional[List[str]] + """The mRIDs of sub-Geographical Region to solve for feeder load analysis""" + + geographicalRegions: Optional[List[str]] + """The mRIDs of Geographical Region to solve for feeder load analysis""" + + startDate: str + """Start date for this analysis""" + + endDate: str + """End date for this analysis""" + + fetchLvNetwork: bool + """Whether to stop analysis at distribution transformer""" + + processFeederLoads: bool + """Whether to include values corresponding to feeder event time points in the report""" + + processCoincidentLoads: bool + """Whether to include values corresponding to conductor event time points in the report""" + + produceBasicReport: bool + """Request for a basic report""" + + produceConductorReport: bool + """Request for an extensive report""" + + aggregateAtFeederLevel: bool + """Request for a report which aggregate all downstream load at the feeder level""" + + output: str + """The file name of the resulting study""" diff --git a/src/zepben/eas/client/work_package.py b/src/zepben/eas/client/work_package.py index 66ec675..bef9e00 100644 --- a/src/zepben/eas/client/work_package.py +++ b/src/zepben/eas/client/work_package.py @@ -848,6 +848,50 @@ class WorkPackageConfig: """Configuration for applying an intervention""" +@dataclass +class FeederLoadAnalysisConfig: + """ A data class representing the configuration for a feeder load analysis study """ + + feeders : Optional[List[str]] + """The mRIDs of feeders to solve for feeder load analysis""" + + substations : Optional[List[str]] + """The mRIDs of substations to solve for feeder load analysis""" + + subGeographicalRegions : Optional[List[str]] + """The mRIDs of sub-Geographical Region to solve for feeder load analysis""" + + geographicalRegions : Optional[List[str]] + """The mRIDs of Geographical Region to solve for feeder load analysis""" + + startDate : str + """Start date for this analysis""" + + endDate : str + """End date for this analysis""" + + fetchLvNetwork : bool + """Whether to stop analysis at distribution transformer""" + + processFeederLoads : bool + """Whether to include values corresponding to feeder event time points in the report""" + + processCoincidentLoads : bool + """Whether to include values corresponding to conductor event time points in the report""" + + produceBasicReport : bool + """Request for a basic report""" + + produceConductorReport : bool + """Request for an extensive report""" + + aggregateAtFeederLevel : bool + """Request for a report which aggregate all downstream load at the feeder level""" + + output : str + """The file name of the resulting study""" + + @dataclass class WorkPackageProgress: id: str From 32d5c848a262a6e2f0dfb91a89f4e5e80f517a9f Mon Sep 17 00:00:00 2001 From: Jimmy Tung Date: Mon, 14 Jul 2025 09:48:53 +1000 Subject: [PATCH 2/3] fix variable not being pass through in feeder_load_analysis_input, change naming convention to fit python standards and remove redundant data class in work_package.py. Signed-off-by: Jimmy Tung --- src/zepben/eas/client/eas_client.py | 19 ++++---- .../eas/client/feeder_load_analysis_input.py | 20 ++++----- src/zepben/eas/client/work_package.py | 44 ------------------- 3 files changed, 21 insertions(+), 62 deletions(-) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 8573559..6a20ae2 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -780,14 +780,17 @@ async def async_run_feeder_load_analysis_report(self, feeder_load_analysis_input "variables": { "input": { "feeders": feeder_load_analysis_input.feeders, - "startDate": feeder_load_analysis_input.startDate, - "endDate": feeder_load_analysis_input.endDate, - "fetchLvNetwork": feeder_load_analysis_input.fetchLvNetwork, - "processFeederLoads": feeder_load_analysis_input.processFeederLoads, - "processCoincidentLoads": feeder_load_analysis_input.processCoincidentLoads, - "produceBasicReport": feeder_load_analysis_input.produceBasicReport, - "produceConductorReport": feeder_load_analysis_input.produceConductorReport, - "aggregateAtFeederLevel": feeder_load_analysis_input.aggregateAtFeederLevel, + "substations": feeder_load_analysis_input.substations, + "subGeographicalRegions": feeder_load_analysis_input.sub_geographical_regions, + "geographicalRegions": feeder_load_analysis_input.feeders, + "startDate": feeder_load_analysis_input.start_date, + "endDate": feeder_load_analysis_input.end_date, + "fetchLvNetwork": feeder_load_analysis_input.fetch_lv_network, + "processFeederLoads": feeder_load_analysis_input.process_feeder_loads, + "processCoincidentLoads": feeder_load_analysis_input.process_coincident_loads, + "produceBasicReport": feeder_load_analysis_input.produce_basic_report, + "produceConductorReport": feeder_load_analysis_input.produce_conductor_report, + "aggregateAtFeederLevel": feeder_load_analysis_input.aggregate_at_feeder_level, "output": feeder_load_analysis_input.output } } diff --git a/src/zepben/eas/client/feeder_load_analysis_input.py b/src/zepben/eas/client/feeder_load_analysis_input.py index 8ee690a..ec8fd0f 100644 --- a/src/zepben/eas/client/feeder_load_analysis_input.py +++ b/src/zepben/eas/client/feeder_load_analysis_input.py @@ -21,34 +21,34 @@ class FeederLoadAnalysisInput: substations: Optional[List[str]] """The mRIDs of substations to solve for feeder load analysis""" - subGeographicalRegions: Optional[List[str]] + sub_geographical_regions: Optional[List[str]] """The mRIDs of sub-Geographical Region to solve for feeder load analysis""" - geographicalRegions: Optional[List[str]] + geographical_regions: Optional[List[str]] """The mRIDs of Geographical Region to solve for feeder load analysis""" - startDate: str + start_date: str """Start date for this analysis""" - endDate: str + end_date: str """End date for this analysis""" - fetchLvNetwork: bool + fetch_lv_network: bool """Whether to stop analysis at distribution transformer""" - processFeederLoads: bool + process_feeder_loads: bool """Whether to include values corresponding to feeder event time points in the report""" - processCoincidentLoads: bool + process_coincident_loads: bool """Whether to include values corresponding to conductor event time points in the report""" - produceBasicReport: bool + produce_basic_report: bool """Request for a basic report""" - produceConductorReport: bool + produce_conductor_report: bool """Request for an extensive report""" - aggregateAtFeederLevel: bool + aggregate_at_feeder_level: bool """Request for a report which aggregate all downstream load at the feeder level""" output: str diff --git a/src/zepben/eas/client/work_package.py b/src/zepben/eas/client/work_package.py index bef9e00..66ec675 100644 --- a/src/zepben/eas/client/work_package.py +++ b/src/zepben/eas/client/work_package.py @@ -848,50 +848,6 @@ class WorkPackageConfig: """Configuration for applying an intervention""" -@dataclass -class FeederLoadAnalysisConfig: - """ A data class representing the configuration for a feeder load analysis study """ - - feeders : Optional[List[str]] - """The mRIDs of feeders to solve for feeder load analysis""" - - substations : Optional[List[str]] - """The mRIDs of substations to solve for feeder load analysis""" - - subGeographicalRegions : Optional[List[str]] - """The mRIDs of sub-Geographical Region to solve for feeder load analysis""" - - geographicalRegions : Optional[List[str]] - """The mRIDs of Geographical Region to solve for feeder load analysis""" - - startDate : str - """Start date for this analysis""" - - endDate : str - """End date for this analysis""" - - fetchLvNetwork : bool - """Whether to stop analysis at distribution transformer""" - - processFeederLoads : bool - """Whether to include values corresponding to feeder event time points in the report""" - - processCoincidentLoads : bool - """Whether to include values corresponding to conductor event time points in the report""" - - produceBasicReport : bool - """Request for a basic report""" - - produceConductorReport : bool - """Request for an extensive report""" - - aggregateAtFeederLevel : bool - """Request for a report which aggregate all downstream load at the feeder level""" - - output : str - """The file name of the resulting study""" - - @dataclass class WorkPackageProgress: id: str From 404a2832bad0f9205bf333e92b226ca000b3fecd Mon Sep 17 00:00:00 2001 From: Jimmy Tung Date: Fri, 18 Jul 2025 14:09:22 +1000 Subject: [PATCH 3/3] remove process_basic_report and produce_conductor_report variable from feeder_load_analysis_input as we do not offer basic report for now. Signed-off-by: Jimmy Tung --- src/zepben/eas/client/eas_client.py | 3 +-- src/zepben/eas/client/feeder_load_analysis_input.py | 6 ------ 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index 6a20ae2..02d2bca 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -788,8 +788,7 @@ async def async_run_feeder_load_analysis_report(self, feeder_load_analysis_input "fetchLvNetwork": feeder_load_analysis_input.fetch_lv_network, "processFeederLoads": feeder_load_analysis_input.process_feeder_loads, "processCoincidentLoads": feeder_load_analysis_input.process_coincident_loads, - "produceBasicReport": feeder_load_analysis_input.produce_basic_report, - "produceConductorReport": feeder_load_analysis_input.produce_conductor_report, + "produceConductorReport": True, # We currently only support conductor report "aggregateAtFeederLevel": feeder_load_analysis_input.aggregate_at_feeder_level, "output": feeder_load_analysis_input.output } diff --git a/src/zepben/eas/client/feeder_load_analysis_input.py b/src/zepben/eas/client/feeder_load_analysis_input.py index ec8fd0f..4b6dd31 100644 --- a/src/zepben/eas/client/feeder_load_analysis_input.py +++ b/src/zepben/eas/client/feeder_load_analysis_input.py @@ -42,12 +42,6 @@ class FeederLoadAnalysisInput: process_coincident_loads: bool """Whether to include values corresponding to conductor event time points in the report""" - produce_basic_report: bool - """Request for a basic report""" - - produce_conductor_report: bool - """Request for an extensive report""" - aggregate_at_feeder_level: bool """Request for a report which aggregate all downstream load at the feeder level"""