Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion input/runtime.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,5 @@ Input_NH4 0
Input_PO4 0
direct 0.95
dist 1
interval 1
interval 1
output_CSV 1
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ requires-python = ">=3.12"
dependencies = [
"pandas>=2.2.3",
"scipy>=1.15.2",
"xarray>=2025.12.0"
]

[dependency-groups]
Expand Down
6 changes: 5 additions & 1 deletion src/dementpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ def main():
pulse = int(runtime.loc['pulse',1]) # number of pulses
cycle = int(runtime.loc['end_time',1]) # number of time steps in each pulse
interval = int(runtime.loc['interval',1]) # interval of time step to record outputs
Export_format = int(runtime.loc['output_CSV',1]) # interval of time step to record outputs
mode = int(runtime.loc['dispersal',1]) # 0:'default' or 1:'dispersal'

#...Initialize data by calling the Function: Initialize_Data()
Expand Down Expand Up @@ -101,7 +102,10 @@ def main():

#...export the Output_init object to the output_folder using the export() funtion in the utility module
os.chdir('../'+output_folder)
export(Output_init, site, outname)
if Export_format == 1:
Output_init.export_to_csv(outname)
else:
Output_init.export_to_netcdf(outname)

if __name__ == '__main__':
main()
135 changes: 134 additions & 1 deletion src/initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@
"""
import pandas as pd
import numpy as np
import xarray as xr

import warnings
import numbers
from pathlib import Path
from typing import Union

from substrate import Substrate
from monomer import Monomer
Expand Down Expand Up @@ -163,4 +169,131 @@ def initialize_data(runtime_parameters, site):
'Psi': daily_psi # water potential
}

return Data_Dictionary
return Data_Dictionary


def export_initialization_dict_to_csv(base_path: Path | str, d: dict) -> None:
"""Export contents of the initialisation directory to a folder.

Writes each of the items of a type below to a separate CSV file
- pandas.DataFrame
- pandas.Series
- numpy.ndarray of rank below 2
All scalar numbers are grouped in a single CSV 'scalars.csv' file.

Note:
All other items are ignored following a warning!
If you need them written you need to add extra entry.
"""

# Create space for output
base_path = Path(base_path)
base_path.mkdir(parents=True, exist_ok=True)

# Collect all scalar numbers
scalar_numbers = dict()

for name, member in d.items():
if isinstance(member, (pd.DataFrame, pd.Series)):
fname = name + ".csv"
member.to_csv(base_path / fname)
elif isinstance(member, np.ndarray):
if len(member.shape) <= 2:
fname = name + ".csv"
np.savetxt(fname, member, delimiter=",")
else:
warnings.warn(
f"Member '{name}' of initialisation dictionary could not be saved since "
f"it is an array of rank higher than 2 (rank: {len(member.shape)})."
)
elif isinstance(member, numbers.Number):
scalar_numbers[name] = member
else:
warnings.warn(
f"Initialisation member '{name}' has unsupported type '{type(member)}'. "
f"It has not been exported to the output directory '{base_path}'."
)

# Print numbers
pd.Series(scalar_numbers).to_csv(base_path / "scalars.csv")

def export_to_netcdf(self, base_path: Path | str) -> None:
"""Export contents of the output file to a directory in NetCDF format.
- Each pandas.DataFrame member is saved to a separate .nc file.
- All pandas.Series members are combined and saved to a single 'series.nc' file.
- All scalar numerical members are grouped and saved to 'scalars.nc'.

Parameters:
base_path : Path
A path that names the root directory where contents will be exported.
If the directory does not exist it will be created.
"""
# Create space for output
base_path = Path(base_path)
base_path.mkdir(parents=True, exist_ok=True)

# Collect all series and scalar data
series_data = dict()
scalar_numbers = dict()

for name, member in vars(self).items():
# convert each DataFrame to an xarray Dataset and save to .nc
if isinstance(member, pd.DataFrame):
# Ensure column names are strings
member.columns = member.columns.astype(str)
if member.index.name is not None:
member.index.name = str(member.index.name)
fname = name + ".nc" # use the .nc extension
try:
xarray_member = xr.Dataset.from_dataframe(member)
xarray_member.to_netcdf(base_path / fname)
except Exception as e:
warnings.warn(
f"Could not export DataFrame '{name}' to NetCDF. Error: {e}"
)

elif isinstance(member, pd.Series):
series_data[name] = member

elif isinstance(member, numbers.Number):
scalar_numbers[name] = member

elif name == "Initialization":
# Special case - Initialization dictionary
# Serialise it to a subfolder
path = base_path / name
export_initialization_dict_to_netcdf(path, member)
elif isinstance(member, pd.Series):
xrmember = xr.DataArray(member)
fname = name + ".nc"
xrmember.to_netcdf(base_path / fname)

else:
warnings.warn(
f"Output member '{name}' has unsupported type '{type(member)}'. "
f"It has not been exported to the output directory '{base_path}'."
)

# process and save Series
if series_data:
try:
# Combine all Series into a single DataFrame.
combined_series_df = pd.concat(series_data, axis=1)
# Convert the combined DataFrame to an xarray Dataset.
series_dataset = xr.Dataset.from_dataframe(combined_series_df)
# Save the Series Dataset to a single NetCDF file.
series_dataset.to_netcdf(base_path / "series.nc")
except ValueError as e:
# This handles the "duplicate labels" error if it occurs.
warnings.warn(
f"Could not export combined series due to an error: {e}. "
"Consider cleaning the index of your Series data first."
)

if scalar_numbers:
# Create an xarray Dataset directly from the dictionary of scalars.
# Each key will become a variable in the NetCDF file.
scalars_dataset = xr.Dataset(scalar_numbers)
# Save the scalars Dataset to a NetCDF file.
scalars_dataset.to_netcdf(base_path / "scalars.nc")

137 changes: 137 additions & 0 deletions src/output.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,16 @@
# output.py module dealing with outputs of DEMENTpy.
# Bin Wang, January, 2020

from pathlib import Path
import warnings
import numbers

from initialization import export_initialization_dict_to_csv
from initialization import export_initialization_dict_to_netcdf

import numpy as np
import pandas as pd
import xarray as xr

class Output():
"""
Expand Down Expand Up @@ -293,3 +301,132 @@ def microbes_tradeoff(self, ecosystem, year, day):
GY_grid = ecosystem.Microbe_C_Gain.groupby(level=0,sort=False).sum()
GY_grid.name = self.cycle*year + (day+1)
self.Growth_yield = pd.concat([self.Growth_yield,GY_grid],axis=1,sort=False)


def export_to_csv(self, base_path: Path | str) -> None:
"""Export contents of the output file to a directory.

Exports each class member of type pandas.DataFrame to a separate CSV file.
All pandas.Series members are combined in a DataFrame and printed dto 'series.csv' file.
Similarly all scalar numerical members are grouped in 'scalars.csv'.

Parameters:
base_path : Path
A path that names the root directory where contents will be exported.
If the directory does not exist it will be created.
"""
# Create space for output
base_path = Path(base_path)
base_path.mkdir(parents=True, exist_ok=True)

# Collect all series and scalar data
# We will dump them at the end
series_data = dict()
scalar_numbers = dict()

for name, member in vars(self).items():
if isinstance(member, pd.DataFrame):
fname = name + ".csv"
member.to_csv(base_path / fname)
elif isinstance(member, pd.Series):
series_data[name] = member
elif isinstance(member, numbers.Number):
scalar_numbers[name] = member
elif name == "Initialization":
# Special case - Initialization dictionary
# Serialise it to a subfolder
path = base_path / name
export_initialization_dict(path, member)
else:
warnings.warn(
f"Output member '{name}' has unsupported type '{type(member)}'. "
f"It has not been exported to the output directory '{base_path}'."
)

# If it happens that Series have different lengths they will be padded
# with missing data labels (NaNs)
series_data = pd.concat(series_data, axis=1)
series_data.to_csv(base_path / "series.csv")

# Print numbers
pd.Series(scalar_numbers).to_csv(base_path / "scalars.csv")

def export_to_netcdf(self, base_path: Path | str) -> None:
"""Export contents of the output file to a directory in NetCDF format.
- Each pandas.DataFrame member is saved to a separate .nc file.
- All pandas.Series members are combined and saved to a single 'series.nc' file.
- All scalar numerical members are grouped and saved to 'scalars.nc'.

Parameters:
base_path : Path
A path that names the root directory where contents will be exported.
If the directory does not exist it will be created.
"""
# Create space for output
base_path = Path(base_path)
base_path.mkdir(parents=True, exist_ok=True)

# Collect all series and scalar data
series_data = dict()
scalar_numbers = dict()

for name, member in vars(self).items():
# convert each DataFrame to an xarray Dataset and save to .nc
if isinstance(member, pd.DataFrame):
# Ensure column names are strings
member.columns = member.columns.astype(str)
if member.index.name is not None:
member.index.name = str(member.index.name)
fname = name + ".nc" # use the .nc extension
try:
xarray_member = xr.Dataset.from_dataframe(member)
xarray_member.to_netcdf(base_path / fname)
except Exception as e:
warnings.warn(
f"Could not export DataFrame '{name}' to NetCDF. Error: {e}"
)

elif isinstance(member, pd.Series):
series_data[name] = member

elif isinstance(member, numbers.Number):
scalar_numbers[name] = member

elif name == "Initialization":
# Special case - Initialization dictionary
# Serialise it to a subfolder
path = base_path / name
export_initialization_dict_to_netcdf(path, member)
elif isinstance(member, pd.Series):
xrmember = xr.DataArray(member)
fname = name + ".nc"
xrmember.to_netcdf(base_path / fname)

else:
warnings.warn(
f"Output member '{name}' has unsupported type '{type(member)}'. "
f"It has not been exported to the output directory '{base_path}'."
)

# process and save Series
if series_data:
try:
# Combine all Series into a single DataFrame.
combined_series_df = pd.concat(series_data, axis=1)
# Convert the combined DataFrame to an xarray Dataset.
series_dataset = xr.Dataset.from_dataframe(combined_series_df)
# Save the Series Dataset to a single NetCDF file.
series_dataset.to_netcdf(base_path / "series.nc")
except ValueError as e:
# This handles the "duplicate labels" error if it occurs.
warnings.warn(
f"Could not export combined series due to an error: {e}. "
"Consider cleaning the index of your Series data first."
)

if scalar_numbers:
# Create an xarray Dataset directly from the dictionary of scalars.
# Each key will become a variable in the NetCDF file.
scalars_dataset = xr.Dataset(scalar_numbers)
# Save the scalars Dataset to a NetCDF file.
scalars_dataset.to_netcdf(base_path / "scalars.nc")