Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
* `FixedTimeLoadOverride` now takes in optional list of floats instead of optional float for its variable.

### New Features
* None.
* Added `FeederLoadAnalysisInput` which holds the config for feeder-load-analysis studies.
* These then can be utilized in the `run_feeder_load_analysis_report` and `async_run_feeder_load_analysis_report` functions

### Enhancements
* None.
Expand Down
197 changes: 128 additions & 69 deletions src/zepben/eas/client/eas_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from urllib3.exceptions import InsecureRequestWarning
from zepben.auth import AuthMethod, ZepbenTokenFetcher, create_token_fetcher, create_token_fetcher_managed_identity

from zepben.eas.client.feeder_load_analysis_input import FeederLoadAnalysisInput
from zepben.eas.client.opendss import OpenDssConfig, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput
from zepben.eas.client.study import Study
from zepben.eas.client.util import construct_url
Expand All @@ -30,20 +31,20 @@ class EasClient:
"""

def __init__(
self,
host: str,
port: int,
protocol: str = "https",
client_id: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
access_token: Optional[str] = None,
client_secret: Optional[str] = None,
token_fetcher: Optional[ZepbenTokenFetcher] = None,
verify_certificate: bool = True,
ca_filename: Optional[str] = None,
session: ClientSession = None,
json_serialiser=None
self,
host: str,
port: int,
protocol: str = "https",
client_id: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
access_token: Optional[str] = None,
client_secret: Optional[str] = None,
token_fetcher: Optional[ZepbenTokenFetcher] = None,
verify_certificate: bool = True,
ca_filename: Optional[str] = None,
session: ClientSession = None,
json_serialiser=None
):
"""
Construct a client for the Evolve App Server. If the server is HTTPS, authentication may be configured.
Expand Down Expand Up @@ -414,10 +415,10 @@ async def async_get_work_package_cost_estimation(self, work_package: WorkPackage
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -645,10 +646,10 @@ async def async_run_hosting_capacity_work_package(self, work_package: WorkPackag
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -687,10 +688,10 @@ async def async_cancel_hosting_capacity_work_package(self, work_package_id: str)
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -739,10 +740,68 @@ async def async_get_hosting_capacity_work_packages_progress(self):
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
else:
response = await response.text()
return response

def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput):
"""
Send request to evolve app server to run a feeder load analysis study

:param feeder_load_analysis_input:: An instance of the `FeederLoadAnalysisConfig` data class representing the configuration for the run
:return: The HTTP response received from the Evolve App Server after attempting to run work package
"""
return get_event_loop().run_until_complete(
self.async_run_feeder_load_analysis_report(feeder_load_analysis_input))

async def async_run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput):
"""
Asynchronously send request to evolve app server to run a feeder load analysis study

:return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report
"""
with warnings.catch_warnings():
if not self._verify_certificate:
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
json = {
"query":
"""
mutation runFeederLoadAnalysis($input: FeederLoadAnalysisInput!) {
runFeederLoadAnalysis(input: $input)
}
""",
"variables": {
"input": {
"feeders": feeder_load_analysis_input.feeders,
"substations": feeder_load_analysis_input.substations,
"subGeographicalRegions": feeder_load_analysis_input.sub_geographical_regions,
"geographicalRegions": feeder_load_analysis_input.feeders,
"startDate": feeder_load_analysis_input.start_date,
"endDate": feeder_load_analysis_input.end_date,
"fetchLvNetwork": feeder_load_analysis_input.fetch_lv_network,
"processFeederLoads": feeder_load_analysis_input.process_feeder_loads,
"processCoincidentLoads": feeder_load_analysis_input.process_coincident_loads,
"produceConductorReport": True, # We currently only support conductor report
"aggregateAtFeederLevel": feeder_load_analysis_input.aggregate_at_feeder_level,
"output": feeder_load_analysis_input.output
}
}
}
if self._verify_certificate:
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -804,10 +863,10 @@ async def async_upload_study(self, study: Study):
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -852,10 +911,10 @@ async def async_run_hosting_capacity_calibration(self, calibration_name: str,
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -903,10 +962,10 @@ async def async_get_hosting_capacity_calibration_run(self, id: str):
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -940,10 +999,10 @@ async def async_get_hosting_capacity_calibration_sets(self):
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -1094,10 +1153,10 @@ async def async_run_opendss_export(self, config: OpenDssConfig):
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand All @@ -1106,11 +1165,11 @@ async def async_run_opendss_export(self, config: OpenDssConfig):
return response

def get_paged_opendss_models(
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
query_filter: Optional[GetOpenDssModelsFilterInput] = None,
query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None):
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
query_filter: Optional[GetOpenDssModelsFilterInput] = None,
query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None):
"""
Retrieve a paginated opendss export run information
:param limit: The number of opendss export runs to retrieve
Expand All @@ -1123,11 +1182,11 @@ def get_paged_opendss_models(
self.async_get_paged_opendss_models(limit, offset, query_filter, query_sort))

async def async_get_paged_opendss_models(
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
query_filter: Optional[GetOpenDssModelsFilterInput] = None,
query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None):
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
query_filter: Optional[GetOpenDssModelsFilterInput] = None,
query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None):
"""
Retrieve a paginated opendss export run information
:param limit: The number of opendss export runs to retrieve
Expand Down Expand Up @@ -1281,10 +1340,10 @@ async def async_get_paged_opendss_models(
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.post(
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"),
headers=self._get_request_headers(),
json=json,
ssl=sslcontext if self._verify_certificate else False
) as response:
if response.ok:
response = await response.json()
Expand Down Expand Up @@ -1314,11 +1373,11 @@ async def async_get_opendss_model_download_url(self, run_id: int):
sslcontext = ssl.create_default_context(cafile=self._ca_filename)

async with self.session.get(
construct_url(protocol=self._protocol, host=self._host, port=self._port,
path=f"/api/opendss-model/{run_id}"),
headers=self._get_request_headers(),
ssl=sslcontext if self._verify_certificate else False,
allow_redirects=False
construct_url(protocol=self._protocol, host=self._host, port=self._port,
path=f"/api/opendss-model/{run_id}"),
headers=self._get_request_headers(),
ssl=sslcontext if self._verify_certificate else False,
allow_redirects=False
) as response:
if response.status == HTTPStatus.FOUND:
response = response.headers["Location"]
Expand Down
49 changes: 49 additions & 0 deletions src/zepben/eas/client/feeder_load_analysis_input.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Copyright 2020 Zeppelin Bend Pty Ltd
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from dataclasses import dataclass
from typing import List, Optional

__all__ = [
"FeederLoadAnalysisInput"
]


@dataclass
class FeederLoadAnalysisInput:
""" A data class representing the configuration for a feeder load analysis study """

feeders: Optional[List[str]]
"""The mRIDs of feeders to solve for feeder load analysis"""

substations: Optional[List[str]]
"""The mRIDs of substations to solve for feeder load analysis"""

sub_geographical_regions: Optional[List[str]]
"""The mRIDs of sub-Geographical Region to solve for feeder load analysis"""

geographical_regions: Optional[List[str]]
"""The mRIDs of Geographical Region to solve for feeder load analysis"""

start_date: str
"""Start date for this analysis"""

end_date: str
"""End date for this analysis"""

fetch_lv_network: bool
"""Whether to stop analysis at distribution transformer"""

process_feeder_loads: bool
"""Whether to include values corresponding to feeder event time points in the report"""

process_coincident_loads: bool
"""Whether to include values corresponding to conductor event time points in the report"""

aggregate_at_feeder_level: bool
"""Request for a report which aggregate all downstream load at the feeder level"""

output: str
"""The file name of the resulting study"""