Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: "3.12"
python-version: "3.14"
cache: 'pip'
cache-dependency-path: 'requirements-dev.txt'
cache-dependency-path: 'requirements-all.txt'
- name: Install Ruff
run: |
python -m pip install --upgrade pip
Expand All @@ -26,7 +26,7 @@ jobs:
build:
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
platform: [octave]
os: [ubuntu-latest]

Expand All @@ -41,7 +41,7 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
cache-dependency-path: 'requirements-dev.txt'
cache-dependency-path: 'requirements-all.txt'

- name: Install Octave (Linux)
if: matrix.platform == 'octave'
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ jobs:
build:
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
platform: [octave]
os: [ubuntu-latest]

Expand All @@ -22,7 +22,7 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
cache-dependency-path: 'requirements-dev.txt'
cache-dependency-path: 'requirements-all.txt'

- name: Install Octave (Linux)
if: matrix.platform == 'octave'
Expand All @@ -45,12 +45,12 @@ jobs:
- name: Clone this repository
uses: actions/checkout@v3

- name: Set up Python 3.12
- name: Set up Python 3.14
uses: actions/setup-python@v4
with:
python-version: 3.12
python-version: 3.14
cache: 'pip'
cache-dependency-path: 'requirements-dev.txt'
cache-dependency-path: 'requirements-all.txt'

- name: Install build dependencies
run: |
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -144,3 +144,6 @@ dmypy.json

# Mac
.DS_Store

# test
tests/data/*.xlsx
7 changes: 3 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
default_stages: [commit]
default_stages: [pre-commit]

repos:
# check yaml and end of file fixer
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v6.0.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
Expand All @@ -12,7 +12,7 @@ repos:
# autofix using ruff
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.5.0
rev: v0.14.10
hooks:
# Run the linter.
- id: ruff
Expand All @@ -21,4 +21,3 @@ repos:
# Run the formatter.
- id: ruff-format
types_or: [ python, pyi, jupyter ]
# args: [ --verbose ]
17 changes: 16 additions & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,19 @@
# Contirbuting
# Contributing

## env

```sh
uv venv env --python 3.14
source env/bin/activate
uv pip install pip
```

## packages

```sh
pip install pru
pru -r requirements-all.txt
```

## Install in development mode

Expand Down
129 changes: 100 additions & 29 deletions matpowercaseframes/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,37 +78,46 @@ def _read_data(
# TODO: support Path
# TYPE: str of path
path = self._get_path(data)
path_no_ext, ext = os.path.splitext(path)

if ext == ".m":
# read `.m` file
if load_case_engine is None:
# read with matpower parser
self._read_matpower(
filepath=path,
allow_any_keys=allow_any_keys,
)
else:
# read using loadcase
mpc = load_case_engine.loadcase(path)
self._read_oct2py_struct(
struct=mpc,
allow_any_keys=allow_any_keys,
)
elif ext == ".xlsx":
# read `.xlsx` file
self._read_excel(
filepath=path,
# check if path is a directory (for CSV files)
if os.path.isdir(path):
self._read_csv_dir(
dirpath=path,
prefix=prefix,
suffix=suffix,
allow_any_keys=allow_any_keys,
)
self.name = os.path.basename(path_no_ext)
self.name = os.path.basename(path)
else:
# TODO: support read directory of csv for schema and .csv data
message = f"Can't find data at {data}"
raise FileNotFoundError(message)

path_no_ext, ext = os.path.splitext(path)

if ext == ".m":
# read `.m` file
if load_case_engine is None:
# read with matpower parser
self._read_matpower(
filepath=path,
allow_any_keys=allow_any_keys,
)
else:
# read using loadcase
mpc = load_case_engine.loadcase(path)
self._read_oct2py_struct(
struct=mpc,
allow_any_keys=allow_any_keys,
)
elif ext == ".xlsx":
# read `.xlsx` file
self._read_excel(
filepath=path,
prefix=prefix,
suffix=suffix,
allow_any_keys=allow_any_keys,
)
self.name = os.path.basename(path_no_ext)
else:
message = f"Can't find data at {os.path.abspath(data)}"
raise FileNotFoundError(message)
elif isinstance(data, dict):
# TYPE: dict | oct2py.io.Struct
self._read_oct2py_struct(
Expand Down Expand Up @@ -162,15 +171,17 @@ def _get_path(path):
Determine the correct file path for the given input.

Args:
path (str): File path or MATPOWER case name.
path (str): File path, directory path, or MATPOWER case name.

Returns:
str: Resolved file path.
str: Resolved file path or directory path.

Raises:
FileNotFoundError: If the file or MATPOWER case cannot be found.
"""
# TODO: support read directory of csv for schema and .csv data
# directory exist on path (for CSV directory)
if os.path.isdir(path):
return path

# file exist on path
if os.path.isfile(path):
Expand Down Expand Up @@ -198,7 +209,9 @@ def _get_path(path):
if os.path.isfile(path_added_matpower_m):
return path_added_matpower_m

raise FileNotFoundError
# Create detailed error message
error_msg = f"Could not find file or directory '{path}'."
raise FileNotFoundError(error_msg)

def _read_matpower(self, filepath, allow_any_keys=False):
"""
Expand Down Expand Up @@ -333,6 +346,64 @@ def _read_excel(self, filepath, prefix="", suffix="", allow_any_keys=False):

self.setattr(attribute, value)

def _read_csv_dir(self, dirpath, prefix="", suffix="", allow_any_keys=False):
"""
Read data from a directory of CSV files.

Args:
dirpath (str): Directory path containing the CSV files.
prefix (str): File prefix for each attribute CSV file.
suffix (str): File suffix for each attribute CSV file.
allow_any_keys (bool): Whether to allow any keys beyond ATTRIBUTES.
"""
# create a dictionary mapping attribute names to file paths
csv_data = {}
for csv_file in os.listdir(dirpath):
if csv_file.endswith(".csv"):
# remove prefix and suffix to get the attribute name
attribute = csv_file[:-4] # remove '.csv' extension

if prefix and attribute.startswith(prefix):
attribute = attribute[len(prefix) :]
if suffix and attribute.endswith(suffix):
attribute = attribute[: -len(suffix)]

csv_data[attribute] = os.path.join(dirpath, csv_file)

self._attributes = []

# info CSV to extract general metadata
info_name = "info"
if info_name in csv_data:
info_data = pd.read_csv(csv_data[info_name], index_col=0)

value = info_data.loc["version", "INFO"].item()
self.setattr("version", str(value))

value = info_data.loc["baseMVA", "INFO"].item()
self.setattr("baseMVA", value)

# iterate through the remaining CSV files
for attribute, filepath in csv_data.items():
# skip the info file
if attribute == info_name:
continue

# check attribute rule
if attribute not in ATTRIBUTES and not allow_any_keys:
continue

# read CSV file
sheet_data = pd.read_csv(filepath, index_col=0)

if attribute in ["bus_name", "branch_name", "gen_name"]:
# convert back to an index
value = pd.Index(sheet_data[attribute].values.tolist(), name=attribute)
else:
value = sheet_data

self.setattr(attribute, value)

def _get_dataframe(self, attribute, data, n_cols=None, columns_template=None):
"""
Create a DataFrame with proper columns from raw data.
Expand Down
Loading